27 Commits

Author SHA1 Message Date
narawat lamaiin
a01a91e7b9 update 2025-05-01 12:05:59 +07:00
narawat lamaiin
aa8436c0ed update 2025-05-01 08:04:01 +07:00
narawat lamaiin
cccad676db update 2025-05-01 07:59:37 +07:00
narawat lamaiin
03de659c9b update companion 2025-04-30 12:58:32 +07:00
narawat lamaiin
affb96f0cf update 2025-04-29 18:45:52 +07:00
narawat lamaiin
f19f302bd9 update 2025-04-29 11:01:36 +07:00
narawat lamaiin
7ca4f5276d update 2025-04-26 06:20:09 +07:00
narawat lamaiin
44804041a3 update 2025-04-25 21:12:27 +07:00
narawat lamaiin
48a3704f6d update 2025-04-13 21:46:54 +07:00
8321a13afc update 2025-04-04 15:23:34 +07:00
b26ae31d4c mark new version 2025-04-04 15:23:11 +07:00
ton
b397bf7bdb Merge pull request 'v0.1.4' (#3) from v0.1.4 into main
Reviewed-on: #3
2025-04-04 08:14:57 +00:00
narawat lamaiin
c0edf7dadf update 2025-04-04 15:04:02 +07:00
narawat lamaiin
c21f943b12 update 2025-04-01 21:17:15 +07:00
narawat lamaiin
b8fd772a28 update 2025-03-31 21:30:14 +07:00
narawat lamaiin
883f581b2a update 2025-03-22 15:34:00 +07:00
narawat lamaiin
5a890860a6 update 2025-03-22 09:42:51 +07:00
7d5bc14a09 mark new version 2025-03-21 10:13:53 +07:00
ton
37ba3a9d31 Merge pull request 'v0.1.3-dev' (#2) from v0.1.3-dev into main
Reviewed-on: #2
2025-03-21 03:09:16 +00:00
bfadd53033 update 2025-03-21 10:03:08 +07:00
8fc3afe348 update 2025-03-20 16:15:38 +07:00
c60037226a update 2025-03-13 19:11:20 +07:00
narawat lamaiin
db6c9c5f2b update 2025-03-07 13:34:15 +07:00
narawat lamaiin
6504099959 update 2025-01-31 09:50:44 +07:00
724b092bdb update 2025-01-30 21:28:49 +07:00
c56c3d02b0 update 2025-01-29 12:16:01 +07:00
ton
a7f3e29e9c Merge pull request 'WIP v0.1.2-dev' (#1) from v0.1.2-dev into main
Reviewed-on: #1
2025-01-25 07:30:18 +00:00
13 changed files with 2238 additions and 1694 deletions

View File

@@ -1,8 +1,8 @@
# This file is machine-generated - editing it directly is not advised # This file is machine-generated - editing it directly is not advised
julia_version = "1.11.2" julia_version = "1.11.4"
manifest_format = "2.0" manifest_format = "2.0"
project_hash = "b483014657ef9f0fde60d7258585b291d6f0eeca" project_hash = "cb7f3c57318e927e8ac4dc2dea9acdcace566ed1"
[[deps.AliasTables]] [[deps.AliasTables]]
deps = ["PtrArrays", "Random"] deps = ["PtrArrays", "Random"]
@@ -120,9 +120,9 @@ version = "1.11.0"
[[deps.Distributions]] [[deps.Distributions]]
deps = ["AliasTables", "FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns"] deps = ["AliasTables", "FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns"]
git-tree-sha1 = "3101c32aab536e7a27b1763c0797dba151b899ad" git-tree-sha1 = "0b4190661e8a4e51a842070e7dd4fae440ddb7f4"
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f" uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
version = "0.25.113" version = "0.25.118"
[deps.Distributions.extensions] [deps.Distributions.extensions]
DistributionsChainRulesCoreExt = "ChainRulesCore" DistributionsChainRulesCoreExt = "ChainRulesCore"
@@ -158,9 +158,9 @@ version = "0.1.10"
[[deps.FileIO]] [[deps.FileIO]]
deps = ["Pkg", "Requires", "UUIDs"] deps = ["Pkg", "Requires", "UUIDs"]
git-tree-sha1 = "2dd20384bf8c6d411b5c7370865b1e9b26cb2ea3" git-tree-sha1 = "b66970a70db13f45b7e57fbda1736e1cf72174ea"
uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549"
version = "1.16.6" version = "1.17.0"
weakdeps = ["HTTP"] weakdeps = ["HTTP"]
[deps.FileIO.extensions] [deps.FileIO.extensions]
@@ -168,9 +168,9 @@ weakdeps = ["HTTP"]
[[deps.FilePathsBase]] [[deps.FilePathsBase]]
deps = ["Compat", "Dates"] deps = ["Compat", "Dates"]
git-tree-sha1 = "7878ff7172a8e6beedd1dea14bd27c3c6340d361" git-tree-sha1 = "3bab2c5aa25e7840a4b065805c0cdfc01f3068d2"
uuid = "48062228-2e41-5def-b9a4-89aafe57970f" uuid = "48062228-2e41-5def-b9a4-89aafe57970f"
version = "0.9.22" version = "0.9.24"
weakdeps = ["Mmap", "Test"] weakdeps = ["Mmap", "Test"]
[deps.FilePathsBase.extensions] [deps.FilePathsBase.extensions]
@@ -200,11 +200,9 @@ version = "1.11.0"
[[deps.GeneralUtils]] [[deps.GeneralUtils]]
deps = ["CSV", "DataFrames", "DataStructures", "Dates", "Distributions", "JSON3", "MQTTClient", "PrettyPrinting", "Random", "SHA", "UUIDs"] deps = ["CSV", "DataFrames", "DataStructures", "Dates", "Distributions", "JSON3", "MQTTClient", "PrettyPrinting", "Random", "SHA", "UUIDs"]
git-tree-sha1 = "978d9a5c3fc30205dd72d4a2a2ed4fa85ebee5cf" path = "/appfolder/app/dev/GeneralUtils"
repo-rev = "main"
repo-url = "https://git.yiem.cc/ton/GeneralUtils"
uuid = "c6c72f09-b708-4ac8-ac7c-2084d70108fe" uuid = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
version = "0.1.0" version = "0.2.3"
[[deps.HTTP]] [[deps.HTTP]]
deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "PrecompileTools", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"] deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "PrecompileTools", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"]
@@ -214,9 +212,9 @@ version = "1.10.13"
[[deps.HypergeometricFunctions]] [[deps.HypergeometricFunctions]]
deps = ["LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"] deps = ["LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"]
git-tree-sha1 = "b1c2585431c382e3fe5805874bda6aea90a95de9" git-tree-sha1 = "68c173f4f449de5b438ee67ed0c9c748dc31a2ec"
uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a" uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a"
version = "0.3.25" version = "0.3.28"
[[deps.ICU_jll]] [[deps.ICU_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
@@ -260,9 +258,9 @@ uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f"
version = "1.3.0" version = "1.3.0"
[[deps.IrrationalConstants]] [[deps.IrrationalConstants]]
git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2" git-tree-sha1 = "e2222959fbc6c19554dc15174c81bf7bf3aa691c"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.2.2" version = "0.2.4"
[[deps.IterTools]] [[deps.IterTools]]
git-tree-sha1 = "42d5f897009e7ff2cf88db414a389e5ed1bdd023" git-tree-sha1 = "42d5f897009e7ff2cf88db414a389e5ed1bdd023"
@@ -305,12 +303,10 @@ uuid = "b39eb1a6-c29a-53d7-8c32-632cd16f18da"
version = "1.19.3+0" version = "1.19.3+0"
[[deps.LLMMCTS]] [[deps.LLMMCTS]]
deps = ["GeneralUtils", "JSON3"] deps = ["GeneralUtils", "JSON3", "PrettyPrinting"]
git-tree-sha1 = "d8c653b8fafbd3757b7332985efaf1fdb8b6fe97" path = "/appfolder/app/dev/LLMMCTS"
repo-rev = "main"
repo-url = "https://git.yiem.cc/ton/LLMMCTS"
uuid = "d76c5a4d-449e-4835-8cc4-dd86ec44f241" uuid = "d76c5a4d-449e-4835-8cc4-dd86ec44f241"
version = "0.1.2" version = "0.1.4"
[[deps.LaTeXStrings]] [[deps.LaTeXStrings]]
git-tree-sha1 = "dda21b8cbd6a6c40d9d02a73230f9d70fed6918c" git-tree-sha1 = "dda21b8cbd6a6c40d9d02a73230f9d70fed6918c"
@@ -370,9 +366,9 @@ version = "1.11.0"
[[deps.LogExpFunctions]] [[deps.LogExpFunctions]]
deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"] deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "a2d09619db4e765091ee5c6ffe8872849de0feea" git-tree-sha1 = "13ca9e2586b89836fd20cccf56e57e2b9ae7f38f"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.28" version = "0.3.29"
[deps.LogExpFunctions.extensions] [deps.LogExpFunctions.extensions]
LogExpFunctionsChainRulesCoreExt = "ChainRulesCore" LogExpFunctionsChainRulesCoreExt = "ChainRulesCore"
@@ -475,7 +471,7 @@ version = "0.3.27+1"
[[deps.OpenLibm_jll]] [[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"] deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112" uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
version = "0.8.1+2" version = "0.8.1+4"
[[deps.OpenSSL]] [[deps.OpenSSL]]
deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "OpenSSL_jll", "Sockets"] deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "OpenSSL_jll", "Sockets"]
@@ -493,7 +489,7 @@ version = "3.0.15+1"
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1" git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0" version = "0.5.5+2"
[[deps.OrderedCollections]] [[deps.OrderedCollections]]
git-tree-sha1 = "12f1439c4f986bb868acda6ea33ebc78e19b95ad" git-tree-sha1 = "12f1439c4f986bb868acda6ea33ebc78e19b95ad"
@@ -502,9 +498,9 @@ version = "1.7.0"
[[deps.PDMats]] [[deps.PDMats]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "949347156c25054de2db3b166c52ac4728cbad65" git-tree-sha1 = "48566789a6d5f6492688279e22445002d171cf76"
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150" uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
version = "0.11.31" version = "0.11.33"
[[deps.Parsers]] [[deps.Parsers]]
deps = ["Dates", "PrecompileTools", "UUIDs"] deps = ["Dates", "PrecompileTools", "UUIDs"]
@@ -556,15 +552,15 @@ uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
version = "1.11.0" version = "1.11.0"
[[deps.PtrArrays]] [[deps.PtrArrays]]
git-tree-sha1 = "77a42d78b6a92df47ab37e177b2deac405e1c88f" git-tree-sha1 = "1d36ef11a9aaf1e8b74dacc6a731dd1de8fd493d"
uuid = "43287f4e-b6f4-7ad1-bb20-aadabca52c3d" uuid = "43287f4e-b6f4-7ad1-bb20-aadabca52c3d"
version = "1.2.1" version = "1.3.0"
[[deps.QuadGK]] [[deps.QuadGK]]
deps = ["DataStructures", "LinearAlgebra"] deps = ["DataStructures", "LinearAlgebra"]
git-tree-sha1 = "cda3b045cf9ef07a08ad46731f5a3165e56cf3da" git-tree-sha1 = "9da16da70037ba9d701192e27befedefb91ec284"
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
version = "2.11.1" version = "2.11.2"
[deps.QuadGK.extensions] [deps.QuadGK.extensions]
QuadGKEnzymeExt = "Enzyme" QuadGKEnzymeExt = "Enzyme"
@@ -623,11 +619,9 @@ version = "0.7.0"
[[deps.SQLLLM]] [[deps.SQLLLM]]
deps = ["CSV", "DataFrames", "DataStructures", "Dates", "FileIO", "GeneralUtils", "HTTP", "JSON3", "LLMMCTS", "LibPQ", "PrettyPrinting", "Random", "Revise", "StatsBase", "Tables", "URIs", "UUIDs"] deps = ["CSV", "DataFrames", "DataStructures", "Dates", "FileIO", "GeneralUtils", "HTTP", "JSON3", "LLMMCTS", "LibPQ", "PrettyPrinting", "Random", "Revise", "StatsBase", "Tables", "URIs", "UUIDs"]
git-tree-sha1 = "45e660e44de0950a5e5f92d467298d8b768b6023" path = "/appfolder/app/dev/SQLLLM"
repo-rev = "main"
repo-url = "https://git.yiem.cc/ton/SQLLLM"
uuid = "2ebc79c7-cc10-4a3a-9665-d2e1d61e63d3" uuid = "2ebc79c7-cc10-4a3a-9665-d2e1d61e63d3"
version = "0.2.0" version = "0.2.4"
[[deps.SQLStrings]] [[deps.SQLStrings]]
git-tree-sha1 = "55de0530689832b1d3d43491ee6b67bd54d3323c" git-tree-sha1 = "55de0530689832b1d3d43491ee6b67bd54d3323c"
@@ -672,9 +666,9 @@ version = "1.11.0"
[[deps.SpecialFunctions]] [[deps.SpecialFunctions]]
deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "2f5d4697f21388cbe1ff299430dd169ef97d7e14" git-tree-sha1 = "64cca0c26b4f31ba18f13f6c12af7c85f478cfde"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b" uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.4.0" version = "2.5.0"
[deps.SpecialFunctions.extensions] [deps.SpecialFunctions.extensions]
SpecialFunctionsChainRulesCoreExt = "ChainRulesCore" SpecialFunctionsChainRulesCoreExt = "ChainRulesCore"
@@ -699,16 +693,16 @@ uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.7.0" version = "1.7.0"
[[deps.StatsBase]] [[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"] deps = ["AliasTables", "DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "5cf7606d6cef84b543b483848d4ae08ad9832b21" git-tree-sha1 = "29321314c920c26684834965ec2ce0dacc9cf8e5"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91" uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.34.3" version = "0.34.4"
[[deps.StatsFuns]] [[deps.StatsFuns]]
deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"] deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
git-tree-sha1 = "b423576adc27097764a90e163157bcfc9acf0f46" git-tree-sha1 = "35b09e80be285516e52c9054792c884b9216ae3c"
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c" uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
version = "1.3.2" version = "1.4.0"
[deps.StatsFuns.extensions] [deps.StatsFuns.extensions]
StatsFunsChainRulesCoreExt = "ChainRulesCore" StatsFunsChainRulesCoreExt = "ChainRulesCore"

View File

@@ -1,9 +1,10 @@
name = "YiemAgent" name = "YiemAgent"
uuid = "e012c34b-7f78-48e0-971c-7abb83b6f0a2" uuid = "e012c34b-7f78-48e0-971c-7abb83b6f0a2"
authors = ["narawat lamaiin <narawat@outlook.com>"] authors = ["narawat lamaiin <narawat@outlook.com>"]
version = "0.1.2" version = "0.2.0"
[deps] [deps]
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0" DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a" Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
@@ -21,7 +22,5 @@ URIs = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[compat] [compat]
CSV = "0.10.15"
DataFrames = "1.7.0" DataFrames = "1.7.0"
GeneralUtils = "0.1, 0.2"
LLMMCTS = "0.1.2"
SQLLLM = "0.2.0"

View File

@@ -0,0 +1,72 @@
To make **LLM-driven inference** fast while maintaining its dynamic capabilities, there are a few practices or approaches to avoid, as they could lead to performance bottlenecks or inefficiencies. Here's what *not* to do:
---
### **1. Avoid Using Overly Large Models for Every Query**
While larger LLMs like GPT-4 provide high accuracy and nuanced responses, they may slow down real-time processing due to their computational complexity. Instead:
- Use distilled or smaller models (e.g., GPT-3.5 Turbo or fine-tuned versions) for faster inference without compromising much on quality.
---
### **2. Avoid Excessive Entity Preprocessing**
Dont rely on overly complicated preprocessing steps (like advanced NER models or regex-heavy pipelines) to extract entities from the query before invoking the LLM. This could add latency. Instead:
- Design efficient prompts that allow the LLM to extract entities and generate responses simultaneously.
---
### **3. Avoid Asking the LLM Multiple Separate Questions**
Running the LLM for multiple subtasks—for example, entity extraction first and response generation second—can significantly slow down the pipeline. Instead:
- Create prompts that combine tasks into one pass, e.g., *"Identify the city name and generate a weather response for this query: 'What's the weather in London?'"*.
---
### **4. Dont Overload the LLM with Context History**
Excessively lengthy conversation history or irrelevant context in your prompts can slow down inference times. Instead:
- Provide only the relevant context for each query, trimming unnecessary parts of the conversation.
---
### **5. Avoid Real-Time Dependence on External APIs**
Using external APIs to fetch supplementary data (e.g., weather details or location info) during every query can introduce latency. Instead:
- Pre-fetch API data asynchronously and use the LLM to integrate it dynamically into responses.
---
### **6. Avoid Running LLM on Underpowered Hardware**
Running inference on CPUs or low-spec GPUs will result in slower response times. Instead:
- Deploy the LLM on optimized infrastructure (e.g., high-performance GPUs like NVIDIA A100 or cloud platforms like Azure AI) to reduce latency.
---
### **7. Skip Lengthy Generative Prompts**
Avoid prompts that encourage the LLM to produce overly detailed or verbose responses, as these take longer to process. Instead:
- Use concise prompts that focus on generating actionable or succinct answers.
---
### **8. Dont Ignore Optimization Techniques**
Failing to optimize your LLM setup can drastically impact performance. For example:
- Avoid skipping techniques like model quantization (reducing numerical precision to speed up inference) or distillation (training smaller models).
---
### **9. Dont Neglect Response Caching**
While you may not want a full caching system to avoid sunk costs, dismissing lightweight caching entirely can impact speed. Instead:
- Use temporary session-based caching for very frequent queries, without committing to a full-fledged cache infrastructure.
---
### **10. Avoid One-Size-Fits-All Solutions**
Applying the same LLM inference method to all queries—whether simple or complex—will waste processing resources. Instead:
- Route basic queries to faster, specialized models and use the LLM for nuanced or multi-step queries only.
---
### Summary: Focus on Efficient Design
By avoiding these pitfalls, you can ensure that LLM-driven inference remains fast and responsive:
- Optimize prompts.
- Use smaller models for simpler queries.
- Run the LLM on high-performance hardware.
- Trim unnecessary preprocessing or contextual steps.
Would you like me to help refine a prompt or suggest specific tools to complement your implementation? Let me know!

File diff suppressed because it is too large Load Diff

View File

@@ -2,7 +2,7 @@ module llmfunction
export virtualWineUserChatbox, jsoncorrection, checkinventory, # recommendbox, export virtualWineUserChatbox, jsoncorrection, checkinventory, # recommendbox,
virtualWineUserRecommendbox, userChatbox, userRecommendbox, extractWineAttributes_1, virtualWineUserRecommendbox, userChatbox, userRecommendbox, extractWineAttributes_1,
extractWineAttributes_2 extractWineAttributes_2, paraphrase
using HTTP, JSON3, URIs, Random, PrettyPrinting, UUIDs, Dates using HTTP, JSON3, URIs, Random, PrettyPrinting, UUIDs, Dates
using GeneralUtils, SQLLLM using GeneralUtils, SQLLLM
@@ -291,24 +291,23 @@ julia> result = checkinventory(agent, input)
function checkinventory(a::T1, input::T2 function checkinventory(a::T1, input::T2
) where {T1<:agent, T2<:AbstractString} ) where {T1<:agent, T2<:AbstractString}
println("\n~~~ checkinventory order: $input ", Dates.now(), " ", @__FILE__, " ", @__LINE__) println("\ncheckinventory order: $input ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
wineattributes_1 = extractWineAttributes_1(a, input) wineattributes_1 = extractWineAttributes_1(a, input)
wineattributes_2 = extractWineAttributes_2(a, input) wineattributes_2 = extractWineAttributes_2(a, input)
_inventoryquery = "retailer name: $(a.retailername), $wineattributes_1, $wineattributes_2" _inventoryquery = "retailer name: $(a.retailername), $wineattributes_1, $wineattributes_2"
inventoryquery = "Retrieves winery, wine_name, vintage, region, country, wine_type, grape, serving_temperature, sweetness, intensity, tannin, acidity, tasting_notes, price and currency of wines that match the following criteria - {$_inventoryquery}" inventoryquery = "Retrieves winery, wine_name, wine_id, vintage, region, country, wine_type, grape, serving_temperature, sweetness, intensity, tannin, acidity, tasting_notes, price and currency of wines that match the following criteria - {$_inventoryquery}"
println("~~~ checkinventory input: $inventoryquery ", Dates.now(), " ", @__FILE__, " ", @__LINE__) println("\ncheckinventory input: $inventoryquery ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
# add suppport for similarSQLVectorDB # add suppport for similarSQLVectorDB
textresult, rawresponse = SQLLLM.query(inventoryquery, a.func[:executeSQL], textresult, rawresponse = SQLLLM.query(inventoryquery, a.func[:executeSQL],
a.func[:text2textInstructLLM], a.func[:text2textInstructLLM];
insertSQLVectorDB=a.func[:insertSQLVectorDB], insertSQLVectorDB=a.func[:insertSQLVectorDB],
similarSQLVectorDB=a.func[:similarSQLVectorDB]) similarSQLVectorDB=a.func[:similarSQLVectorDB],
llmFormatName="qwen3")
println("\n~~~ checkinventory result ", Dates.now(), " ", @__FILE__, " ", @__LINE__) println("\ncheckinventory result ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
println(textresult) println(textresult)
#[WORKING] when rawresponse is nothing, AI get errors
return (result=textresult, rawresponse=rawresponse, success=true, errormsg=nothing) return (result=textresult, rawresponse=rawresponse, success=true, errormsg=nothing)
end end
@@ -328,60 +327,83 @@ julia>
# TODO # TODO
- [] update docstring - [] update docstring
- [x] implement the function - implement the function
# Signature # Signature
""" """
function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<:AbstractString} function extractWineAttributes_1(a::T1, input::T2; maxattempt=10
)::String where {T1<:agent, T2<:AbstractString}
systemmsg = systemmsg =
""" """
As a helpful sommelier, your task is to extract the user information from the user's query as much as possible to fill out user's preference form. As a helpful sommelier, your task is to extract the user information from the user's query as much as possible to fill out user's preference form.
At each round of conversation, the user will give you the current situation: At each round of conversation, the user will give you the following:
User's query: ... User's query: ...
You must follow the following guidelines: You must follow the following guidelines:
1) If specific information required in the preference form is not available in the query or there isn't any, mark with "NA" to indicate this. - If specific information required in the preference form is not available in the query or there isn't any, mark with "NA" to indicate this.
Additionally, words like 'any' or 'unlimited' mean no information is available. Additionally, words like 'any' or 'unlimited' mean no information is available.
2) Do not generate other comments. - Do not generate other comments.
You should then respond to the user with the following points: You should then respond to the user with:
- reasoning: state your understanding of the current situation Thought: state your understanding of the current situation
- wine_name: name of the wine Wine_name: name of the wine
- winery: name of the winery Winery: name of the winery
- vintage: the year of the wine Vintage: the year of the wine
- region: a region (NOT a country) where the wine is produced, such as Burgundy, Napa Valley, etc Region: a region (NOT a country) where the wine is produced, such as Burgundy, Napa Valley, etc
- country: a country where the wine is produced. Can be "Austria", "Australia", "France", "Germany", "Italy", "Portugal", "Spain", "United States" Country: a country where the wine is produced. Can be "Austria", "Australia", "France", "Germany", "Italy", "Portugal", "Spain", "United States"
- wine_type: can be one of: "red", "white", "sparkling", "rose", "dessert" or "fortified" Wine_type: can be one of: "red", "white", "sparkling", "rose", "dessert" or "fortified"
- grape_varietal: the name of the primary grape used to make the wine Grape_varietal: the name of the primary grape used to make the wine
- tasting_notes: a brief description of the wine's taste, such as "butter", "oak", "fruity", etc Tasting_notes: a brief description of the wine's taste, such as "butter", "oak", "fruity", etc
- wine_price: price range of wine. Wine_price: price range of wine.
- occasion: the occasion the user is having the wine for Occasion: the occasion the user is having the wine for
- food_to_be_paired_with_wine: food that the user will be served with the wine such as poultry, fish, steak, etc Food_to_be_paired_with_wine: food that the user will be served with the wine such as poultry, fish, steak, etc
You should only respond in the user's preference form (JSON) as described below: You should only respond in format as described below:
{"reasoning": ..., "winery": ..., "wine_name": ..., "vintage": ..., "region": ..., "country": ..., "wine_type": ..., "grape_varietal": ..., "tasting_notes": ..., "wine_price": ..., "occasion": ..., "food_to_be_paired_with_wine": ...} Thought: ...
Wine_name: ...
Here are some example: Winery: ...
User's query: red, Chenin Blanc, Riesling, 20 USD Vintage: ...
{"reasoning": ..., "winery": "NA", "wine_name": "NA", "vintage": "NA", "region": "NA", "country": "NA", "wine_type": "red, white", "grape_varietal": "Chenin Blanc, Riesling", "tasting_notes": "NA", "wine_price": "0-20", "occasion": "NA", "food_to_be_paired_with_wine": "NA"} Region: ...
Country: ...
Wine_type:
Grape_varietal: ...
Tasting_notes: ...
Wine_price: ...
Occasion: ...
Food_to_be_paired_with_wine: ...
Here are some example:
User's query: red, Chenin Blanc, Riesling, 20 USD
{"reasoning": ..., "winery": "NA", "wine_name": "NA", "vintage": "NA", "region": "NA", "country": "NA", "wine_type": "red, white", "grape_varietal": "Chenin Blanc, Riesling", "tasting_notes": "NA", "wine_price": "0-20", "occasion": "NA", "food_to_be_paired_with_wine": "NA"}
User's query: Domaine du Collier Saumur Blanc 2019, France, white, Chenin Blanc User's query: Domaine du Collier Saumur Blanc 2019, France, white, Merlot
{"reasoning": ..., "winery": "Domaine du Collier", "wine_name": "Saumur Blanc", "vintage": "2019", "region": "Saumur", "country": "France", "wine_type": "white", "grape_varietal": "Chenin Blanc", "tasting_notes": "NA", "wine_price": "NA", "occasion": "NA", "food_to_be_paired_with_wine": "NA"} {"reasoning": ..., "winery": "Domaine du Collier", "wine_name": "Saumur Blanc", "vintage": "2019", "region": "Saumur", "country": "France", "wine_type": "white", "grape_varietal": "Merlot", "tasting_notes": "NA", "wine_price": "NA", "occasion": "NA", "food_to_be_paired_with_wine": "NA"}
Let's begin! Let's begin!
""" """
attributes = ["reasoning", "winery", "wine_name", "vintage", "region", "country", "wine_type", "grape_varietal", "tasting_notes", "wine_price", "occasion", "food_to_be_paired_with_wine"] header = ["Thought:", "Wine_name:", "Winery:", "Vintage:", "Region:", "Country:", "Wine_type:", "Grape_varietal:", "Tasting_notes:", "Wine_price:", "Occasion:", "Food_to_be_paired_with_wine:"]
errornote = "" dictkey = ["thought", "wine_name", "winery", "vintage", "region", "country", "wine_type", "grape_varietal", "tasting_notes", "wine_price", "occasion", "food_to_be_paired_with_wine"]
errornote = "N/A"
for attempt in 1:5 llmkwargs=Dict(
:num_ctx => 32768,
:temperature => 0.5,
)
for attempt in 1:maxattempt
#[PENDING] I should add generatequestion()
if attempt > 1
println("\nYiemAgent extractWineAttributes_1() attempt $attempt/$maxattempt ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
end
usermsg = usermsg =
""" """
User's query: $input User's query: $input
$errornote P.S. $errornote
""" """
_prompt = _prompt =
@@ -391,32 +413,39 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
] ]
# put in model format # put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct") prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
prompt *= response = a.func[:text2textInstructLLM](prompt;
""" modelsize="medium", llmkwargs=llmkwargs, senderId=a.id)
<|start_header_id|>assistant<|end_header_id|>
"""
response = a.func[:text2textInstructLLM](prompt)
response = GeneralUtils.remove_french_accents(response) response = GeneralUtils.remove_french_accents(response)
response = GeneralUtils.deFormatLLMtext(response, "granite3")
# check wheter all attributes are in the response # check wheter all attributes are in the response
checkFlag = false checkFlag = false
for word in attributes for word in header
if !occursin(word, response) if !occursin(word, response)
errornote = "$word attribute is missing in previous attempts" errornote = "In your previous attempts, the $word attribute is missing. Please try again."
println("Attempt $attempt $errornote ", Dates.now(), " ", @__FILE__, " ", @__LINE__) println("\nYiemAgent extractWineAttributes_1() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
checkFlag = true checkFlag = true
break break
end end
end end
checkFlag == true ? continue : nothing checkFlag == true ? continue : nothing
# check whether response has all answer's key points
detected_kw = GeneralUtils.detect_keyword(header, response)
if 0 values(detected_kw)
errornote = "In your previous attempts, the response does not have all answer's key points"
println("\nYiemAgent extractWineAttributes_1() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
elseif sum(values(detected_kw)) > length(header)
errornote = "In your previous attempts, the response has duplicated answer's key points"
println("\nYiemAgent extractWineAttributes_1() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
end
responsedict = GeneralUtils.textToDict(response, header;
dictKey=dictkey, symbolkey=true)
responsedict = copy(JSON3.read(response)) delete!(responsedict, :thought)
# convert
delete!(responsedict, :reasoning)
delete!(responsedict, :tasting_notes) delete!(responsedict, :tasting_notes)
delete!(responsedict, :occasion) delete!(responsedict, :occasion)
delete!(responsedict, :food_to_be_paired_with_wine) delete!(responsedict, :food_to_be_paired_with_wine)
@@ -426,25 +455,34 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
# check if winery, wine_name, region, country, wine_type, grape_varietal's value are in the query because sometime AI halucinates # check if winery, wine_name, region, country, wine_type, grape_varietal's value are in the query because sometime AI halucinates
checkFlag = false checkFlag = false
for i in attributes for i in dictkey
j = Symbol(i) j = Symbol(i)
if j [:reasoning, :tasting_notes, :occasion, :food_to_be_paired_with_wine] if j [:thought, :tasting_notes, :occasion, :food_to_be_paired_with_wine]
# in case j is wine_price it needs to be checked differently because its value is ranged # in case j is wine_price it needs to be checked differently because its value is ranged
if j == :wine_price if j == :wine_price
if responsedict[:wine_price] != "NA" if responsedict[:wine_price] != "NA"
# check whether wine_price is in ranged number # check whether wine_price is in ranged number
if !occursin('-', responsedict[:wine_price]) if !occursin('-', responsedict[:wine_price])
errornote = "wine_price must be a range number" errornote = "In your previous attempt, the 'wine_price' was not set to a ranged number. Please adjust it accordingly."
println("Attempt $attempt $errornote ", Dates.now(), " ", @__FILE__, " ", @__LINE__) println("\nERROR YiemAgent extractWineAttributes_1() $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
checkFlag = true checkFlag = true
break break
end end
# check whether max wine_price is in the input # check whether max wine_price is in the input
maxprice = split(responsedict[:wine_price], '-')[end] pricerange = split(responsedict[:wine_price], '-')
minprice = pricerange[1]
maxprice = pricerange[end]
if !occursin(maxprice, input) if !occursin(maxprice, input)
responsedict[:wine_price] = "NA" responsedict[:wine_price] = "NA"
end end
# price range like 100-100 is not good
if minprice == maxprice
errornote = "In your previous attempt, you inputted 'wine_price' with a 'minimum' value equaling the 'maximum', which is not valid."
println("\nERROR YiemAgent extractWineAttributes_1() $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
checkFlag = true
break
end
end end
else else
content = responsedict[j] content = responsedict[j]
@@ -457,18 +495,18 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
content = [content] content = [content]
end end
for x in content #BUG why x is "0-1500" # for x in content #check whether price are mentioned in the input
if !occursin("NA", responsedict[j]) && !occursin(x, input) # if !occursin("NA", responsedict[j]) && !occursin(x, input)
errornote = "$x is not mentioned in the user query, you must only use the info from the query." # errornote = "$x is not mentioned in the user query, you must only use the info from the query."
println("Attempt $attempt $errornote ", Dates.now(), " ", @__FILE__, " ", @__LINE__) # println("ERROR YiemAgent extractWineAttributes_1() $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
checkFlag == true # checkFlag == true
break # break
end # end
end # end
end end
end end
end end
checkFlag == true ? continue : nothing checkFlag == true ? continue : nothing # skip the rest code if true
# remove (some text) # remove (some text)
for (k, v) in responsedict for (k, v) in responsedict
@@ -502,7 +540,7 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
conversiontable = conversiontable =
""" """
Conversion Table: <Conversion Table>
Intensity level: Intensity level:
1 to 2: May correspond to "light-bodied" or a similar description. 1 to 2: May correspond to "light-bodied" or a similar description.
2 to 3: May correspond to "med light bodied", "medium light" or a similar description. 2 to 3: May correspond to "med light bodied", "medium light" or a similar description.
@@ -527,6 +565,7 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
3 to 4: May correspond to "medium acidity" or a similar description. 3 to 4: May correspond to "medium acidity" or a similar description.
4 to 5: May correspond to "semi high acidity" or a similar description. 4 to 5: May correspond to "semi high acidity" or a similar description.
4 to 5: May correspond to "high acidity" or a similar description. 4 to 5: May correspond to "high acidity" or a similar description.
</Conversion Table>
""" """
systemmsg = systemmsg =
@@ -545,67 +584,58 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
Additionally, words like 'any' or 'unlimited' mean no information is available. Additionally, words like 'any' or 'unlimited' mean no information is available.
2) Use the conversion table to convert the descriptive word level of sweetness, intensity, tannin, and acidity into a corresponding integer. 2) Use the conversion table to convert the descriptive word level of sweetness, intensity, tannin, and acidity into a corresponding integer.
3) Do not generate other comments. 3) Do not generate other comments.
You should then respond to the user with:
You should then respond to the user with the following points: Sweetness_keyword: The exact keywords in the user's query describing the sweetness level of the wine.
- sweetness_keyword: The exact keywords in the user's query describing the sweetness level of the wine. Sweetness: ( S ), where ( S ) represents integers indicating the range of sweetness levels. Example: 1-2
- sweetness: ( S ), where ( S ) represents integers indicating the range of sweetness levels. Example: 1-2 Acidity_keyword: The exact keywords in the user's query describing the acidity level of the wine.
- acidity_keyword: The exact keywords in the user's query describing the acidity level of the wine. Acidity: ( A ), where ( A ) represents integers indicating the range of acidity level. Example: 3-5
- acidity: ( A ), where ( A ) represents integers indicating the range of acidity level. Example: 3-5 Tannin_keyword: The exact keywords in the user's query describing the tannin level of the wine.
- tannin_keyword: The exact keywords in the user's query describing the tannin level of the wine. Tannin: ( T ), where ( T ) represents integers indicating the range of tannin level. Example: 1-3
- tannin: ( T ), where ( T ) represents integers indicating the range of tannin level. Example: 1-3 Intensity_keyword: The exact keywords in the user's query describing the intensity level of the wine.
- intensity_keyword: The exact keywords in the user's query describing the intensity level of the wine. Intensity: ( I ), where ( I ) represents integers indicating the range of intensity level. Example: 2-4
- intensity: ( I ), where ( I ) represents integers indicating the range of intensity level. Example: 2-4 You should only respond in format as described below:
Sweetness_keyword: ...
You should only respond in the form (JSON) as described below: Sweetness: ...
{ Acidity_keyword: ...
"sweetness_keyword": ..., Acidity: ...
"sweetness": ..., Tannin_keyword: ...
"acidity_keyword": ..., Tannin: ...
"acidity": ..., Intensity_keyword: ...
"tannin_keyword": ..., Intensity: ...
"tannin": ...,
"intensity_keyword": ...,
"intensity": ...
}
Here are some examples: Here are some examples:
User's query: I want a wine with a medium-bodied, low acidity, medium tannin. User's query: I want a wine with a medium-bodied, low acidity, medium tannin.
{ Sweetness_keyword: NA
"sweetness_keyword": "NA", Sweetness: NA
"sweetness": "NA", Acidity_keyword: low acidity
"acidity_keyword": "low acidity", Acidity: 1-2
"acidity": "1-2", Tannin_keyword: medium tannin
"tannin_keyword": "medium tannin", Tannin: 3-4
"tannin": "3-4", Intensity_keyword: medium-bodied
"intensity_keyword": "medium-bodied", Intensity: 3-4
"intensity": "3-4"
}
User's query: German red wine, under 100, pairs with spicy food
{
"sweetness_keyword": "NA",
"sweetness": "NA",
"acidity_keyword": "NA",
"acidity": "NA",
"tannin_keyword": "NA",
"tannin": "NA",
"intensity_keyword": "NA",
"intensity": "NA"
}
User's query: German red wine, under 100, pairs with spicy food
Sweetness_keyword: NA
Sweetness: NA
Acidity_keyword: NA
Acidity: NA
Tannin_keyword: NA
Tannin: NA
Intensity_keyword: NA
Intensity: NA
Let's begin! Let's begin!
""" """
header = ["Sweetness_keyword:", "Sweetness:", "Acidity_keyword:", "Acidity:", "Tannin_keyword:", "Tannin:", "Intensity_keyword:", "Intensity:"]
dictkey = ["sweetness_keyword", "sweetness", "acidity_keyword", "acidity", "tannin_keyword", "tannin", "intensity_keyword", "intensity"]
errornote = "" errornote = ""
for attempt in 1:5 for attempt in 1:10
usermsg = usermsg =
""" """
$conversiontable $conversiontable
User's query: $input User's query: $input
$errornote P.S. $errornote
""" """
_prompt = _prompt =
@@ -615,22 +645,33 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
] ]
# put in model format # put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct") prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
prompt *=
"""
<|start_header_id|>assistant<|end_header_id|>
"""
response = a.func[:text2textInstructLLM](prompt) response = a.func[:text2textInstructLLM](prompt)
responsedict = copy(JSON3.read(response)) response = GeneralUtils.deFormatLLMtext(response, "granite3")
# check whether response has all answer's key points
detected_kw = GeneralUtils.detect_keyword(header, response)
if 0 values(detected_kw)
errornote = "In your previous attempt does not have all answer's key points"
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
elseif sum(values(detected_kw)) > length(header)
errornote = "In your previous attempt has duplicated answer's key points"
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
end
responsedict = GeneralUtils.textToDict(response, header;
dictKey=dictkey, symbolkey=true)
# check whether each describing keyword is in the input to prevent halucination # check whether each describing keyword is in the input to prevent halucination
for i in ["sweetness", "acidity", "tannin", "intensity"] for i in ["sweetness", "acidity", "tannin", "intensity"]
keyword = Symbol(i * "_keyword") # e.g. sweetness_keyword keyword = Symbol(i * "_keyword") # e.g. sweetness_keyword
value = responsedict[keyword] value = responsedict[keyword]
if value != "NA" && !occursin(value, input) if value != "NA" && !occursin(value, input)
errornote = "WARNING. Keyword $keyword: $value does not appear in the input. You must use information from the input only" errornote = "In your previous attempt, keyword $keyword: $value does not appear in the input. You must use information from the input only"
println("Attempt $attempt $errornote ", Dates.now(), " ", @__FILE__, " ", @__LINE__) println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue continue
end end
@@ -646,7 +687,7 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
if !occursin("keyword", string(k)) if !occursin("keyword", string(k))
if v !== "NA" && (!occursin('-', v) || length(v) > 5) if v !== "NA" && (!occursin('-', v) || length(v) > 5)
errornote = "WARNING: The non-range value {$k: $v} is not allowed. It should be specified in a range format, i.e. min-max." errornote = "WARNING: The non-range value {$k: $v} is not allowed. It should be specified in a range format, i.e. min-max."
println("Attempt $attempt $errornote ", Dates.now(), " ", @__FILE__, " ", @__LINE__) println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue continue
end end
end end
@@ -675,168 +716,122 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
end end
# function concept(a::sommelier, thoughtDict) function paraphrase(text2textInstructLLM::Function, text::String)
# systemmsg = systemmsg =
# """ """
# Your name: N/A Your name: N/A
# Situation: Your vision:
# - You are a helpful assistant - You are a helpful assistant who help the user to paraphrase their text.
# Your vision: Your mission:
# - This is a good opportunity to help the user - To help paraphrase the user's text
# Your mission: Mission's objective includes:
# - To describe the concept of a conversation - To help paraphrase the user's text
# Mission's objective includes: Your responsibility includes:
# - To 1) To help paraphrase the user's text
# Your responsibility includes: Your responsibility does NOT includes:
# 1) Given the situation, convey your thoughts to the user. 1) N/A
# Your responsibility excludes: Your profile:
# 1) Asking or guiding the user to make a purchase - N/A
# 2) Processing sales orders or engaging in any other sales-related activities Additional information:
# 3) Answering questions and offering additional services beyond just recommendations, such as delivery, box, gift wrapping, personalized messages. Customers can reach out to our sales at the store. - N/A
# Your profile:
# - You are a young professional in a big company.
# - You are avid party goer
# - You like beer.
# - You know nothing about wine.
# - You have a budget of 1500usd.
# Additional information:
# - your boss like spicy food.
# - your boss is a middle-aged man.
# At each round of conversation, you will be given the following information: At each round of conversation, you will be given the following information:
# Your ongoing conversation with the user: ... Text: The user's given text
# Context: ...
# Your thoughts: Your current thoughts in your mind
# You MUST follow the following guidelines: You MUST follow the following guidelines:
# - Do not offer additional services you didn't thought. - N/A
# You should follow the following guidelines: You should follow the following guidelines:
# - Focus on the latest conversation. - N/A
# - If the user interrupts, prioritize the user
# - Medium and full-bodied red wines should not be paired with spicy foods.
# You should then respond to the user with: You should then respond to the user with:
# 1) Chat: Given the situation, How would you respond to the user to express your thoughts honestly and keep the conversation going smoothly? Paraphrase: Paraphrased text
# You should only respond in format as described below: You should only respond in format as described below:
# Chat: ... Paraphrase: ...
# Here are some examples of response format: Let's begin!
# Chat: "I see. Let me think about it. I'll get back to you with my recommendation." """
# Let's begin! header = ["Paraphrase:"]
# """ dictkey = ["paraphrase"]
# # a.memory[:shortmem][:available_wine] is a dataframe. errornote = ""
# context = response = nothing # placeholder for show when error msg show up
# if haskey(a.memory[:shortmem], :available_wine)
# "Available wines $(GeneralUtils.dfToString(a.memory[:shortmem][:available_wine]))"
# else
# "None"
# end
# chathistory = vectorOfDictToText(a.chathistory)
# errornote = ""
# response = nothing # placeholder for show when error msg show up
# for attempt in 1:10 for attempt in 1:10
# usermsg = """ usermsg = """
# Your ongoing conversation with the user: $chathistory Text: $text
# Contex: $context P.S. $errornote
# Your thoughts: $(thoughtDict[:understanding]) $(thoughtDict[:reasoning]) $(thoughtDict[:plan]) """
# $errornote
# """
# _prompt = _prompt =
# [ [
# Dict(:name => "system", :text => systemmsg), Dict(:name => "system", :text => systemmsg),
# Dict(:name => "user", :text => usermsg) Dict(:name => "user", :text => usermsg)
# ] ]
# # put in model format # put in model format
# prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct") prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
# prompt *= """
# <|start_header_id|>assistant<|end_header_id|>
# """
# try try
# response = a.func[:text2textInstructLLM](prompt) response = text2textInstructLLM(prompt)
# # sometime the model response like this "here's how I would respond: ..." response = GeneralUtils.deFormatLLMtext(response, "granite3")
# if occursin("respond:", response) # sometime the model response like this "here's how I would respond: ..."
# errornote = "You don't need to intro your response" if occursin("respond:", response)
# error("generatechat() response contain : ", Dates.now(), " ", @__FILE__, " ", @__LINE__) errornote = "You don't need to intro your response"
# end error("\nparaphrase() response contain : ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
# response = GeneralUtils.remove_french_accents(response) end
# response = replace(response, '*'=>"") response = GeneralUtils.remove_french_accents(response)
# response = replace(response, '$' => "USD") response = replace(response, '*'=>"")
# response = replace(response, '`' => "") response = replace(response, '$' => "USD")
# response = GeneralUtils.remove_french_accents(response) response = replace(response, '`' => "")
# responsedict = GeneralUtils.textToDict(response, ["Chat"], response = GeneralUtils.remove_french_accents(response)
# rightmarker=":", symbolkey=true, lowercasekey=true)
# for i ∈ [:chat] # check whether response has all answer's key points
# if length(JSON3.write(responsedict[i])) == 0 detected_kw = GeneralUtils.detect_keyword(header, response)
# error("$i is empty ", Dates.now(), " ", @__FILE__, " ", @__LINE__) if 0 values(detected_kw)
# end errornote = "\nYiemAgent paraphrase() response does not have all answer's key points"
# end continue
elseif sum(values(detected_kw)) > length(header)
errornote = "\nnYiemAgent paraphrase() response has duplicated answer's key points"
continue
end
# # check if there are more than 1 key per categories responsedict = GeneralUtils.textToDict(response, header;
# for i ∈ [:chat] dictKey=dictkey, symbolkey=true)
# matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
# if length(matchkeys) > 1
# error("generatechat has more than one key per categories")
# end
# end
# # check if Context: is in chat for i [:paraphrase]
# if occursin("Context:", responsedict[:chat]) if length(JSON3.write(responsedict[i])) == 0
# error("Context: is in text. This is not allowed") error("$i is empty ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
# end end
end
# println("\n~~~ generatechat() ", Dates.now(), " ", @__FILE__, " ", @__LINE__) # check if there are more than 1 key per categories
# pprintln(Dict(responsedict)) for i [:paraphrase]
matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
if length(matchkeys) > 1
error("paraphrase() has more than one key per categories")
end
end
# # check whether an agent recommend wines before checking inventory or recommend wines println("\nparaphrase() ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
# # outside its inventory pprintln(Dict(responsedict))
# # ask LLM whether there are any winery mentioned in the response
# mentioned_winery = detectWineryName(a, responsedict[:chat])
# if mentioned_winery != "None"
# mentioned_winery = String.(strip.(split(mentioned_winery, ",")))
# # check whether the wine is in event result = responsedict[:paraphrase]
# isWineInEvent = false
# for winename in mentioned_winery
# for event in a.memory[:events]
# if event[:outcome] !== nothing && occursin(winename, event[:outcome])
# isWineInEvent = true
# break
# end
# end
# end
# # if wine is mentioned but not in timeline or shortmem, return result
# # then the agent is not supposed to recommend the wine catch e
# if isWineInEvent == false io = IOBuffer()
showerror(io, e)
# errornote = "Previously: You recommend a wine that is not in your inventory which is not allowed." errorMsg = String(take!(io))
# error("Previously: You recommend a wine that is not in your inventory which is not allowed.") st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
# end println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
# end end
end
# result = responsedict[:chat] error("paraphrase() failed to generate a response")
end
# return result
# catch e
# io = IOBuffer()
# showerror(io, e)
# errorMsg = String(take!(io))
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
# println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
# end
# end
# error("generatechat failed to generate a response")
# end
@@ -998,7 +993,7 @@ end
# ] # ]
# # put in model format # # put in model format
# prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct") # prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
# prompt *= # prompt *=
# """ # """
# <|start_header_id|>assistant<|end_header_id|> # <|start_header_id|>assistant<|end_header_id|>
@@ -1030,7 +1025,7 @@ end
# state[:isterminal] = true # state[:isterminal] = true
# state[:reward] = 1 # state[:reward] = 1
# end # end
# println("--> 5 Evaluator ", Dates.now(), " ", @__FILE__, " ", @__LINE__) # println("--> 5 Evaluator ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
# pprintln(Dict(responsedict)) # pprintln(Dict(responsedict))
# return responsedict[:score] # return responsedict[:score]
# catch e # catch e

View File

@@ -9,11 +9,48 @@ using GeneralUtils
abstract type agent end abstract type agent end
mutable struct companion <: agent mutable struct companion <: agent
name::String # agent name
id::String # agent id id::String # agent id
systemmsg::Union{String, Nothing} systemmsg::String # system message
tools::Dict # tools
maxHistoryMsg::Integer # e.g. 21th and earlier messages will get summarized maxHistoryMsg::Integer # e.g. 21th and earlier messages will get summarized
chathistory::Vector{Dict{Symbol, Any}}
memory::Dict{Symbol, Any}
func::NamedTuple # NamedTuple of functions
llmFormatName::String
end
function companion(
func::NamedTuple # NamedTuple of functions
;
systemmsg::Union{String, Nothing}= nothing,
name::String= "Assistant",
id::String= GeneralUtils.uuid4snakecase(),
maxHistoryMsg::Integer= 20,
chathistory::Vector{Dict{Symbol, String}} = Vector{Dict{Symbol, String}}(),
llmFormatName::String= "granite3"
)
if systemmsg === nothing
systemmsg =
"""
Your name: $name
Your sex: Female
Your role: You are a helpful assistant.
You should follow the following guidelines:
- Focus on the latest conversation.
- Your like to be short and concise.
Let's begin!
"""
end
tools = Dict( # update input format
"CHATBOX"=> Dict(
:description => "- CHATBOX which you can use to talk with the user. The input is your intentions for the dialogue. Be specific.",
),
)
""" Memory """ Memory
Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3 Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3
@@ -22,45 +59,31 @@ mutable struct companion <: agent
Dict(:name=>"user", :text=> "Wassup!", :timestamp=> Dates.now()), Dict(:name=>"user", :text=> "Wassup!", :timestamp=> Dates.now()),
Dict(:name=>"assistant", :text=> "Hi I'm your assistant.", :timestamp=> Dates.now()), Dict(:name=>"assistant", :text=> "Hi I'm your assistant.", :timestamp=> Dates.now()),
] ]
""" """
chathistory::Vector{Dict{Symbol, Any}}
memory::Dict{Symbol, Any}
# communication function
text2textInstructLLM::Function
end
function companion(
text2textInstructLLM::Function
;
id::String= string(uuid4()),
systemmsg::Union{String, Nothing}= nothing,
maxHistoryMsg::Integer= 20,
chathistory::Vector{Dict{Symbol, String}} = Vector{Dict{Symbol, String}}(),
)
memory = Dict{Symbol, Any}( memory = Dict{Symbol, Any}(
:chatbox=> "", :events=> Vector{Dict{Symbol, Any}}(),
:shortmem=> OrderedDict{Symbol, Any}(), :state=> Dict{Symbol, Any}(), # state of the agent
:events=> Vector{Dict{Symbol, Any}}(), :recap=> OrderedDict{Symbol, Any}(), # recap summary of the conversation
:state=> Dict{Symbol, Any}(), )
)
newAgent = companion( newAgent = companion(
id, name,
systemmsg, id,
maxHistoryMsg, systemmsg,
chathistory, tools,
memory, maxHistoryMsg,
text2textInstructLLM chathistory,
) memory,
func,
llmFormatName
)
return newAgent return newAgent
end end
""" A sommelier agent. """ A sommelier agent.
# Arguments # Arguments
@@ -134,19 +157,10 @@ mutable struct sommelier <: agent
retailername::String retailername::String
tools::Dict tools::Dict
maxHistoryMsg::Integer # e.g. 21th and earlier messages will get summarized maxHistoryMsg::Integer # e.g. 21th and earlier messages will get summarized
""" Memory
Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3
NO "system" message in chathistory because I want to add it at the inference time
chathistory= [
Dict(:name=>"user", :text=> "Wassup!", :timestamp=> Dates.now()),
Dict(:name=>"assistant", :text=> "Hi I'm your assistant.", :timestamp=> Dates.now()),
]
"""
chathistory::Vector{Dict{Symbol, Any}} chathistory::Vector{Dict{Symbol, Any}}
memory::Dict{Symbol, Any} memory::Dict{Symbol, Any}
func # NamedTuple of functions func # NamedTuple of functions
llmFormatName::String
end end
function sommelier( function sommelier(
@@ -157,6 +171,7 @@ function sommelier(
retailername::String= "retailer_name", retailername::String= "retailer_name",
maxHistoryMsg::Integer= 20, maxHistoryMsg::Integer= 20,
chathistory::Vector{Dict{Symbol, String}} = Vector{Dict{Symbol, String}}(), chathistory::Vector{Dict{Symbol, String}} = Vector{Dict{Symbol, String}}(),
llmFormatName::String= "granite3"
) )
tools = Dict( # update input format tools = Dict( # update input format
@@ -170,16 +185,17 @@ function sommelier(
:input => """<input>Input is a JSON-formatted string that contains a detailed and precise search query.</input><input example>{\"wine type\": \"rose\", \"price\": \"max 35\", \"sweetness level\": \"sweet\", \"intensity level\": \"light bodied\", \"Tannin level\": \"low\", \"Acidity level\": \"low\"}</input example>""", :input => """<input>Input is a JSON-formatted string that contains a detailed and precise search query.</input><input example>{\"wine type\": \"rose\", \"price\": \"max 35\", \"sweetness level\": \"sweet\", \"intensity level\": \"light bodied\", \"Tannin level\": \"low\", \"Acidity level\": \"low\"}</input example>""",
:output => """<output>Output are wines that match the search query in JSON format.""", :output => """<output>Output are wines that match the search query in JSON format.""",
), ),
# "finalanswer"=> Dict(
# :description => "<tool description>Useful for when you are ready to recommend wines to the user.</tool description>",
# :input => """<input format>{\"finalanswer\": \"some text\"}.</input format><input example>{\"finalanswer\": \"I recommend Zena Crown Vista\"}</input example>""",
# :output => "" ,
# :func => nothing,
# ),
) )
""" Memory
Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3
NO "system" message in chathistory because I want to add it at the inference time
chathistory= [
Dict(:name=>"user", :text=> "Wassup!", :timestamp=> Dates.now()),
Dict(:name=>"assistant", :text=> "Hi I'm your assistant.", :timestamp=> Dates.now()),
]
"""
memory = Dict{Symbol, Any}( memory = Dict{Symbol, Any}(
:chatbox=> "",
:shortmem=> OrderedDict{Symbol, Any}( :shortmem=> OrderedDict{Symbol, Any}(
:available_wine=> [], :available_wine=> [],
:found_wine=> [], # used by decisionMaker(). This is to prevent decisionMaker() keep presenting the same wines :found_wine=> [], # used by decisionMaker(). This is to prevent decisionMaker() keep presenting the same wines
@@ -198,7 +214,8 @@ function sommelier(
maxHistoryMsg, maxHistoryMsg,
chathistory, chathistory,
memory, memory,
func func,
llmFormatName
) )
return newAgent return newAgent

View File

@@ -107,7 +107,7 @@ function addNewMessage(a::T1, name::String, text::T2;
error("name is not in agent.availableRole $(@__LINE__)") error("name is not in agent.availableRole $(@__LINE__)")
end end
#[WORKING] summarize the oldest 10 message #[PENDING] summarize the oldest 10 message
if length(a.chathistory) > maximumMsg if length(a.chathistory) > maximumMsg
summarize(a.chathistory) summarize(a.chathistory)
else else
@@ -122,47 +122,53 @@ This function takes in a vector of dictionaries and outputs a single string wher
# Arguments # Arguments
- `vecd::Vector` - `vecd::Vector`
a vector of dictionaries A vector of dictionaries containing chat messages
- `withkey::Bool` - `withkey::Bool`
whether to include the key in the output text. Default is true Whether to include the name as a prefix in the output text. Default is true
- `range::Union{Nothing,UnitRange,Int}`
Optional range of messages to include. If nothing, includes all messages
# Return # Returns
a string with the formatted dictionaries A formatted string where each line contains either:
- If withkey=true: "name> message\n"
- If withkey=false: "message\n"
# Example # Example
```jldoctest
julia> using Revise julia> using Revise
julia> using GeneralUtils julia> using GeneralUtils
julia> vecd = [Dict(:name => "John", :text => "Hello"), Dict(:name => "Jane", :text => "Goodbye")] julia> vecd = [Dict(:name => "John", :text => "Hello"), Dict(:name => "Jane", :text => "Goodbye")]
julia> GeneralUtils.vectorOfDictToText(vecd, withkey=true) julia> GeneralUtils.vectorOfDictToText(vecd, withkey=true)
"John> Hello\nJane> Goodbye\n" "John> Hello\nJane> Goodbye\n"
``` ```
# Signature
""" """
function chatHistoryToText(vecd::Vector; withkey=true)::String function chatHistoryToText(vecd::Vector; withkey=true, range=nothing)::String
# Initialize an empty string to hold the final text # Initialize an empty string to hold the final text
text = "" text = ""
# Get the elements within the specified range, or all elements if no range provided
elements = isnothing(range) ? vecd : vecd[range]
# Determine whether to include the key in the output text or not # Determine whether to include the key in the output text or not
if withkey if withkey
# Loop through each dictionary in the input vector # Loop through each dictionary in the input vector
for d in vecd for d in elements
# Extract the 'name' and 'text' keys from the dictionary # Extract the 'name' and 'text' keys from the dictionary
name = d[:name] name = titlecase(d[:name])
_text = d[:text] _text = d[:text]
# Append the formatted string to the text variable # Append the formatted string to the text variable
text *= "$name> $_text \n" text *= "$name> $_text \n"
end end
else else
# Loop through each dictionary in the input vector # Loop through each dictionary in the input vector
for d in vecd for d in elements
# Iterate over all key-value pairs in the dictionary # Iterate over all key-value pairs in the dictionary
for (k, v) in d for (k, v) in d
# Append the formatted string to the text variable # Append the formatted string to the text variable
text *= "$v \n" text *= "$v \n"
end end
end end
end end
# Return the final text # Return the final text
@@ -191,6 +197,35 @@ end
""" Create a dictionary representing an event with optional details.
# Arguments
- `event_description::Union{String, Nothing}`
A description of the event
- `timestamp::Union{DateTime, Nothing}`
The time when the event occurred
- `subject::Union{String, Nothing}`
The subject or entity associated with the event
- `thought::Union{AbstractDict, Nothing}`
Any associated thoughts or metadata
- `actionname::Union{String, Nothing}`
The name of the action performed (e.g., "CHAT", "CHECKINVENTORY")
- `actioninput::Union{String, Nothing}`
Input or parameters for the action
- `location::Union{String, Nothing}`
Where the event took place
- `equipment_used::Union{String, Nothing}`
Equipment involved in the event
- `material_used::Union{String, Nothing}`
Materials used during the event
- `outcome::Union{String, Nothing}`
The result or consequence of the event after action execution
- `note::Union{String, Nothing}`
Additional notes or comments
# Returns
A dictionary with event details as symbol-keyed key-value pairs
"""
function eventdict(; function eventdict(;
event_description::Union{String, Nothing}=nothing, event_description::Union{String, Nothing}=nothing,
timestamp::Union{DateTime, Nothing}=nothing, timestamp::Union{DateTime, Nothing}=nothing,
@@ -204,254 +239,108 @@ function eventdict(;
outcome::Union{String, Nothing}=nothing, outcome::Union{String, Nothing}=nothing,
note::Union{String, Nothing}=nothing, note::Union{String, Nothing}=nothing,
) )
return Dict{Symbol, Any}(
:event_description=> event_description, d = Dict{Symbol, Any}(
:timestamp=> timestamp, :event_description=> event_description,
:subject=> subject, :timestamp=> timestamp,
:thought=> thought, :subject=> subject,
:actionname=> actionname, :thought=> thought,
:actioninput=> actioninput, :actionname=> actionname,
:location=> location, :actioninput=> actioninput,
:equipment_used=> equipment_used, :location=> location,
:material_used=> material_used, :equipment_used=> equipment_used,
:outcome=> outcome, :material_used=> material_used,
:note=> note, :outcome=> outcome,
) :note=> note,
)
return d
end end
function createTimeline(memory::T1; skiprecent::Integer=0) where {T1<:AbstractVector} """ Create a formatted timeline string from a sequence of events.
events = memory[1:end-skiprecent]
# Arguments
- `events::T1`
Vector of event dictionaries containing subject, actioninput and optional outcome fields
Each event dictionary should have the following keys:
- :subject - The subject or entity performing the action
- :actioninput - The action or input performed by the subject
- :outcome - (Optional) The result or outcome of the action
# Returns
- `timeline::String`
A formatted string representing the events with their subjects, actions, and optional outcomes
Format: "{index}) {subject}> {actioninput} {outcome}\n" for each event
# Example
events = [
Dict(:subject => "User", :actioninput => "Hello", :outcome => nothing),
Dict(:subject => "Assistant", :actioninput => "Hi there!", :outcome => "with a smile")
]
timeline = createTimeline(events)
# 1) User> Hello
# 2) Assistant> Hi there! with a smile
"""
function createTimeline(events::T1; eventindex::Union{UnitRange, Nothing}=nothing
) where {T1<:AbstractVector}
# Initialize empty timeline string
timeline = "" timeline = ""
for (i, event) in enumerate(events)
if event[:outcome] === nothing # Determine which indices to use - either provided range or full length
timeline *= "$i) $(event[:subject])> $(event[:actioninput])\n" ind =
if eventindex !== nothing
[eventindex...]
else else
timeline *= "$i) $(event[:subject])> $(event[:actioninput]) $(event[:outcome])\n" 1:length(events)
end
# Iterate through events and format each one
for (i, event) in zip(ind, events)
# If no outcome exists, format without outcome
if event[:outcome] === nothing
timeline *= "Event_$i $(event[:subject])> $(event[:actioninput])\n"
# If outcome exists, include it in formatting
else
timeline *= "Event_$i $(event[:subject])> $(event[:actioninput]) $(event[:outcome])\n"
end end
end end
# Return formatted timeline string
return timeline return timeline
end end
# function createTimeline(events::T1; eventindex::Union{UnitRange, Nothing}=nothing
# ) where {T1<:AbstractVector}
# # Initialize empty timeline string
# timeline = ""
# # Determine which indices to use - either provided range or full length
# ind =
# if eventindex !== nothing
# [eventindex...]
# else
# 1:length(events)
# end
# # Iterate through events and format each one
# """ Convert a single chat dictionary into LLM model instruct format. # for (i, event) in zip(ind, events)
# # If no outcome exists, format without outcome
# # Llama 3 instruct format example # subject = titlecase(event[:subject])
# <|system|> # if event[:outcome] === nothing
# You are a helpful AI assistant.<|end|>
# <|user|> # timeline *= "Event_$i) Who: $subject Action_name: $(event[:actionname]) Action_input: $(event[:actioninput])\n"
# I am going to Paris, what should I see?<|end|> # # If outcome exists, include it in formatting
# <|assistant|> # else
# Paris, the capital of France, is known for its stunning architecture, art museums."<|end|> # timeline *= "Event_$i) Who: $subject Action_name: $(event[:actionname]) Action_input: $(event[:actioninput]) Action output: $(event[:outcome])\n"
# <|user|>
# What is so great about #1?<|end|>
# <|assistant|>
# # Arguments
# - `name::T`
# message owner name e.f. "system", "user" or "assistant"
# - `text::T`
# # Return
# - `formattedtext::String`
# text formatted to model format
# # Example
# ```jldoctest
# julia> using Revise
# julia> using YiemAgent
# julia> d = Dict(:name=> "system",:text=> "You are a helpful, respectful and honest assistant.",)
# julia> formattedtext = YiemAgent.formatLLMtext_phi3instruct(d[:name], d[:text])
# ```
# Signature
# """
# function formatLLMtext_phi3instruct(name::T, text::T) where {T<:AbstractString}
# formattedtext =
# """
# <|$name|>
# $text<|end|>\n
# """
# return formattedtext
# end
# """ Convert a single chat dictionary into LLM model instruct format.
# # Llama 3 instruct format example
# <|begin_of_text|>
# <|start_header_id|>system<|end_header_id|>
# You are a helpful assistant.
# <|eot_id|>
# <|start_header_id|>user<|end_header_id|>
# Get me an icecream.
# <|eot_id|>
# <|start_header_id|>assistant<|end_header_id|>
# Go buy it yourself at 7-11.
# <|eot_id|>
# # Arguments
# - `name::T`
# message owner name e.f. "system", "user" or "assistant"
# - `text::T`
# # Return
# - `formattedtext::String`
# text formatted to model format
# # Example
# ```jldoctest
# julia> using Revise
# julia> using YiemAgent
# julia> d = Dict(:name=> "system",:text=> "You are a helpful, respectful and honest assistant.",)
# julia> formattedtext = YiemAgent.formatLLMtext_llama3instruct(d[:name], d[:text])
# "<|begin_of_text|>\n <|start_header_id|>system<|end_header_id|>\n You are a helpful, respectful and honest assistant.\n <|eot_id|>\n"
# ```
# Signature
# """
# function formatLLMtext_llama3instruct(name::T, text::T) where {T<:AbstractString}
# formattedtext =
# if name == "system"
# """
# <|begin_of_text|>
# <|start_header_id|>$name<|end_header_id|>
# $text
# <|eot_id|>
# """
# else
# """
# <|start_header_id|>$name<|end_header_id|>
# $text
# <|eot_id|>
# """
# end
# return formattedtext
# end
# """ Convert a chat messages in vector of dictionary into LLM model instruct format.
# # Arguments
# - `messages::Vector{Dict{Symbol, T}}`
# message owner name e.f. "system", "user" or "assistant"
# - `formatname::T`
# format name to be used
# # Return
# - `formattedtext::String`
# text formatted to model format
# # Example
# ```jldoctest
# julia> using Revise
# julia> using YiemAgent
# julia> chatmessage = [
# Dict(:name=> "system",:text=> "You are a helpful, respectful and honest assistant.",),
# Dict(:name=> "user",:text=> "list me all planets in our solar system.",),
# Dict(:name=> "assistant",:text=> "I'm sorry. I don't know. You tell me.",),
# ]
# julia> formattedtext = YiemAgent.formatLLMtext(chatmessage, "llama3instruct")
# "<|begin_of_text|>\n <|start_header_id|>system<|end_header_id|>\n You are a helpful, respectful and honest assistant.\n <|eot_id|>\n <|start_header_id|>user<|end_header_id|>\n list me all planets in our solar system.\n <|eot_id|>\n <|start_header_id|>assistant<|end_header_id|>\n I'm sorry. I don't know. You tell me.\n <|eot_id|>\n"
# ```
# # Signature
# """
# function formatLLMtext(messages::Vector{Dict{Symbol, T}},
# formatname::String="llama3instruct") where {T<:Any}
# f = if formatname == "llama3instruct"
# formatLLMtext_llama3instruct
# elseif formatname == "mistral"
# # not define yet
# elseif formatname == "phi3instruct"
# formatLLMtext_phi3instruct
# else
# error("$formatname template not define yet")
# end
# str = ""
# for t in messages
# str *= f(t[:name], t[:text])
# end
# # add <|assistant|> so that the model don't generate it and I don't need to clean it up later
# if formatname == "phi3instruct"
# str *= "<|assistant|>\n"
# end
# return str
# end
# """
# Arguments\n
# -----
# Return\n
# -----
# Example\n
# -----
# ```jldoctest
# julia>
# ```
# TODO\n
# -----
# [] update docstring
# [PENDING] implement the function
# Signature\n
# -----
# """
# function iterativeprompting(a::T, prompt::String, verification::Function) where {T<:agent}
# msgMeta = GeneralUtils.generate_msgMeta(
# a.config[:externalService][:text2textinstruct],
# senderName= "iterativeprompting",
# senderId= a.id,
# receiverName= "text2textinstruct",
# )
# outgoingMsg = Dict(
# :msgMeta=> msgMeta,
# :payload=> Dict(
# :text=> prompt,
# )
# )
# success = nothing
# result = nothing
# critique = ""
# # iteration loop
# while true
# # send prompt to LLM
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
# error("--> iterativeprompting")
# # check for correctness and get feedback
# success, _critique = verification(response)
# if success
# result = response
# break
# else
# # add critique to prompt
# critique *= _critique * "\n"
# replace!(prompt, "Critique: ..." => "Critique: $critique")
# end # end
# end # end
# return (success=success, result=result) # # Return formatted timeline string
# return timeline
# end # end
@@ -482,11 +371,6 @@ end

41
test/Manifest.toml Normal file
View File

@@ -0,0 +1,41 @@
# This file is machine-generated - editing it directly is not advised
julia_version = "1.11.4"
manifest_format = "2.0"
project_hash = "71d91126b5a1fb1020e1098d9d492de2a4438fd2"
[[deps.Base64]]
uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
version = "1.11.0"
[[deps.InteractiveUtils]]
deps = ["Markdown"]
uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
version = "1.11.0"
[[deps.Logging]]
uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
version = "1.11.0"
[[deps.Markdown]]
deps = ["Base64"]
uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
version = "1.11.0"
[[deps.Random]]
deps = ["SHA"]
uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
version = "1.11.0"
[[deps.SHA]]
uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
version = "0.7.0"
[[deps.Serialization]]
uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
version = "1.11.0"
[[deps.Test]]
deps = ["InteractiveUtils", "Logging", "Random", "Serialization"]
uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
version = "1.11.0"

2
test/Project.toml Normal file
View File

@@ -0,0 +1,2 @@
[deps]
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

View File

@@ -27,30 +27,50 @@
"description": "agent role" "description": "agent role"
}, },
"organization": { "organization": {
"value": "yiem_hq", "value": "yiem_branch_1",
"description": "organization name" "description": "organization name"
}, },
"externalservice": { "externalservice": {
"text2textinstruct": { "loadbalancer": {
"mqtttopic": "/loadbalancer/requestingservice", "mqtttopic": "/loadbalancer/requestingservice",
"description": "text to text service with instruct LLM", "description": "text to text service with instruct LLM"
"llminfo": { },
"name": "llama3instruct" "text2textinstruct": {
} "mqtttopic": "/loadbalancer/requestingservice",
}, "description": "text to text service with instruct LLM",
"virtualWineCustomer_1": { "llminfo": {
"mqtttopic": "/virtualenvironment/winecustomer", "name": "llama3instruct"
"description": "text to text service with instruct LLM that act as wine customer", }
"llminfo": { },
"name": "llama3instruct" "virtualWineCustomer_1": {
} "mqtttopic": "/virtualenvironment/winecustomer",
}, "description": "text to text service with instruct LLM that act as wine customer",
"text2textchat": { "llminfo": {
"mqtttopic": "/loadbalancer/requestingservice", "name": "llama3instruct"
"description": "text to text service with instruct LLM", }
"llminfo": { },
"name": "llama3instruct" "text2textchat": {
} "mqtttopic": "/loadbalancer/requestingservice",
} "description": "text to text service with instruct LLM",
"llminfo": {
"name": "llama3instruct"
}
},
"wineDB" : {
"description": "A wine database connection info for LibPQ client",
"host": "192.168.88.12",
"port": 10201,
"dbname": "wineDB",
"user": "yiemtechnologies",
"password": "yiemtechnologies@Postgres_0.0"
},
"SQLVectorDB" : {
"description": "A wine database connection info for LibPQ client",
"host": "192.168.88.12",
"port": 10203,
"dbname": "SQLVectorDB",
"user": "yiemtechnologies",
"password": "yiemtechnologies@Postgres_0.0"
}
} }
} }

View File

@@ -1,9 +0,0 @@
using GeneralUtils
response = "trajectory_evaluation:\nThe trajectory is correct so far. The thought accurately reflects the user's question, and the action taken is a valid attempt to retrieve data from the database that matches the specified criteria.\n\nanswer_evaluation:\nThe observation provides information about two red wines from Bordeaux rive droite in France, which partially answers the question. However, it does not provide a complete answer as it only lists the wine names and characteristics, but does not explicitly state whether there are any other wines that match the criteria.\n\naccepted_as_answer: No\n\nscore: 6\nThe trajectory is mostly correct, but the observation does not fully address the question.\n\nsuggestion: Consider adding more filters or parameters to the database query to retrieve a complete list of wines that match the specified criteria."
responsedict = GeneralUtils.textToDict(response,
["trajectory_evaluation", "answer_evaluation", "accepted_as_answer", "score", "suggestion"],
rightmarker=":", symbolkey=true)

0
test/runtests.jl Normal file
View File

View File

@@ -1,272 +1,294 @@
using Revise using Revise
using JSON, JSON3, Dates, UUIDs, PrettyPrinting, LibPQ, Base64, DataFrames using JSON, JSON3, Dates, UUIDs, PrettyPrinting, LibPQ, Base64, DataFrames
using YiemAgent, GeneralUtils using YiemAgent, GeneralUtils
using Base.Threads using Base.Threads
# ---------------------------------------------- 100 --------------------------------------------- # # ---------------------------------------------- 100 --------------------------------------------- #
# load config # load config
config = JSON3.read("./test/config.json") config = JSON3.read("/appfolder/app/dev/YiemAgent/test/config.json")
# config = copy(JSON3.read("../mountvolume/config.json")) # config = copy(JSON3.read("../mountvolume/config.json"))
function executeSQL(sql::T) where {T<:AbstractString} function executeSQL(sql::T) where {T<:AbstractString}
DBconnection = LibPQ.Connection("host=192.168.88.12 port=10201 dbname=wineDB user=yiemtechnologies password=yiemtechnologies@Postgres_0.0") host = config[:externalservice][:wineDB][:host]
result = LibPQ.execute(DBconnection, sql) port = config[:externalservice][:wineDB][:port]
close(DBconnection) dbname = config[:externalservice][:wineDB][:dbname]
return result user = config[:externalservice][:wineDB][:user]
end password = config[:externalservice][:wineDB][:password]
DBconnection = LibPQ.Connection("host=$host port=$port dbname=$dbname user=$user password=$password")
function executeSQLVectorDB(sql) result = LibPQ.execute(DBconnection, sql)
DBconnection = LibPQ.Connection("host=192.168.88.12 port=10203 dbname=SQLVectorDB user=yiemtechnologies password=yiemtechnologies@Postgres_0.0") close(DBconnection)
result = LibPQ.execute(DBconnection, sql) return result
close(DBconnection) end
return result
end function executeSQLVectorDB(sql)
host = config[:externalservice][:SQLVectorDB][:host]
function text2textInstructLLM(prompt::String) port = config[:externalservice][:SQLVectorDB][:port]
msgMeta = GeneralUtils.generate_msgMeta( dbname = config[:externalservice][:SQLVectorDB][:dbname]
config[:externalservice][:text2textinstruct][:mqtttopic]; user = config[:externalservice][:SQLVectorDB][:user]
msgPurpose="inference", password = config[:externalservice][:SQLVectorDB][:password]
senderName="yiemagent", DBconnection = LibPQ.Connection("host=$host port=$port dbname=$dbname user=$user password=$password")
senderId=string(uuid4()), result = LibPQ.execute(DBconnection, sql)
receiverName="text2textinstruct", close(DBconnection)
mqttBrokerAddress=config[:mqttServerInfo][:broker], return result
mqttBrokerPort=config[:mqttServerInfo][:port], end
)
function text2textInstructLLM(prompt::String; maxattempt::Integer=3, modelsize::String="medium",
outgoingMsg = Dict( llmkwargs=Dict(
:msgMeta => msgMeta, :num_ctx => 32768,
:payload => Dict( :temperature => 0.1,
:text => prompt, )
:kwargs => Dict( )
:num_ctx => 16384, msgMeta = GeneralUtils.generate_msgMeta(
:temperature => 0.2, config[:externalservice][:loadbalancer][:mqtttopic];
) msgPurpose="inference",
) senderName="yiemagent",
) senderId=sessionId,
receiverName="text2textinstruct_$modelsize",
_response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=6000) mqttBrokerAddress=config[:mqttServerInfo][:broker],
response = _response[:response][:text] mqttBrokerPort=config[:mqttServerInfo][:port],
)
return response
end outgoingMsg = Dict(
:msgMeta => msgMeta,
# get text embedding from a LLM service :payload => Dict(
function getEmbedding(text::T) where {T<:AbstractString} :text => prompt,
msgMeta = GeneralUtils.generate_msgMeta( :kwargs => llmkwargs
config[:externalservice][:text2textinstruct][:mqtttopic]; )
msgPurpose="embedding", )
senderName="yiemagent",
senderId=string(uuid4()), response = nothing
receiverName="text2textinstruct", for attempts in 1:maxattempt
mqttBrokerAddress=config[:mqttServerInfo][:broker], _response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=180, maxattempt=maxattempt)
mqttBrokerPort=config[:mqttServerInfo][:port], payload = _response[:response]
) if _response[:success] && payload[:text] !== nothing
response = _response[:response][:text]
outgoingMsg = Dict( break
:msgMeta => msgMeta, else
:payload => Dict( println("\n<text2textInstructLLM()> attempt $attempts/$maxattempt failed ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
:text => [text] # must be a vector of string pprintln(outgoingMsg)
) println("</text2textInstructLLM()> attempt $attempts/$maxattempt failed ", @__FILE__, ":", @__LINE__, " $(Dates.now())\n")
) sleep(3)
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=6000) end
embedding = response[:response][:embeddings] end
return embedding
end return response
end
function findSimilarTextFromVectorDB(text::T1, tablename::T2, embeddingColumnName::T3,
vectorDB::Function; limit::Integer=1 # get text embedding from a LLM service
)::DataFrame where {T1<:AbstractString, T2<:AbstractString, T3<:AbstractString} function getEmbedding(text::T) where {T<:AbstractString}
msgMeta = GeneralUtils.generate_msgMeta(
# get embedding from LLM service config[:externalservice][:loadbalancer][:mqtttopic];
embedding = getEmbedding(text)[1] msgPurpose="embedding",
senderName="yiemagent",
# check whether there is close enough vector already store in vectorDB. if no, add, else skip senderId=sessionId,
sql = """ receiverName="textembedding",
SELECT *, $embeddingColumnName <-> '$embedding' as distance mqttBrokerAddress=config[:mqttServerInfo][:broker],
FROM $tablename mqttBrokerPort=config[:mqttServerInfo][:port],
ORDER BY distance LIMIT $limit; )
"""
response = vectorDB(sql) outgoingMsg = Dict(
df = DataFrame(response) :msgMeta => msgMeta,
return df :payload => Dict(
end :text => [text] # must be a vector of string
)
)
function similarSQLVectorDB(query; maxdistance::Integer=100)
tablename = "sqlllm_decision_repository" response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=120, maxattempt=3)
# get embedding of the query embedding = response[:response][:embeddings]
df = findSimilarTextFromVectorDB(query, tablename, return embedding
"function_input_embedding", executeSQLVectorDB) end
row, col = size(df)
distance = row == 0 ? Inf : df[1, :distance] function findSimilarTextFromVectorDB(text::T1, tablename::T2, embeddingColumnName::T3,
if row != 0 && distance < maxdistance vectorDB::Function; limit::Integer=1
# if there is usable SQL, return it. )::DataFrame where {T1<:AbstractString, T2<:AbstractString, T3<:AbstractString}
output_b64 = df[1, :function_output_base64] # pick the closest match # get embedding from LLM service
output_str = String(base64decode(output_b64)) embedding = getEmbedding(text)[1]
rowid = df[1, :id] # check whether there is close enough vector already store in vectorDB. if no, add, else skip
println("\n~~~ found similar sql. row id $rowid, distance $distance ", @__FILE__, " ", @__LINE__) sql = """
return (dict=output_str, distance=distance) SELECT *, $embeddingColumnName <-> '$embedding' as distance
else FROM $tablename
println("\n~~~ similar sql not found, max distance $maxdistance ", @__FILE__, " ", @__LINE__) ORDER BY distance LIMIT $limit;
return (dict=nothing, distance=nothing) """
end response = vectorDB(sql)
end df = DataFrame(response)
return df
end
function insertSQLVectorDB(query::T1, SQL::T2; maxdistance::Integer=1) where {T1<:AbstractString, T2<:AbstractString}
tablename = "sqlllm_decision_repository" function similarSQLVectorDB(query; maxdistance::Integer=100)
# get embedding of the query tablename = "sqlllm_decision_repository"
# query = state[:thoughtHistory][:question] # get embedding of the query
df = findSimilarTextFromVectorDB(query, tablename, df = findSimilarTextFromVectorDB(query, tablename,
"function_input_embedding", executeSQLVectorDB) "function_input_embedding", executeSQLVectorDB)
row, col = size(df) # println(df[1, [:id, :function_output]])
distance = row == 0 ? Inf : df[1, :distance] row, col = size(df)
if row == 0 || distance > maxdistance # no close enough SQL stored in the database distance = row == 0 ? Inf : df[1, :distance]
query_embedding = getEmbedding(query)[1] # distance = 100 # CHANGE this is for testing only
query = replace(query, "'" => "") if row != 0 && distance < maxdistance
sql_base64 = base64encode(SQL) # if there is usable SQL, return it.
sql_ = replace(SQL, "'" => "") output_b64 = df[1, :function_output_base64] # pick the closest match
output_str = String(base64decode(output_b64))
sql = """ rowid = df[1, :id]
INSERT INTO $tablename (function_input, function_output, function_output_base64, function_input_embedding) VALUES ('$query', '$sql_', '$sql_base64', '$query_embedding'); println("\n~~~ found similar sql. row id $rowid, distance $distance ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
""" return (dict=output_str, distance=distance)
println("\n~~~ added new decision to vectorDB ", @__FILE__, " ", @__LINE__) else
println(sql) println("\n~~~ similar sql not found, max distance $maxdistance ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
_ = executeSQLVectorDB(sql) return (dict=nothing, distance=nothing)
end end
end end
function insertSQLVectorDB(query::T1, SQL::T2; maxdistance::Integer=3) where {T1<:AbstractString, T2<:AbstractString}
function similarSommelierDecision(recentevents::T1; maxdistance::Integer=5 tablename = "sqlllm_decision_repository"
)::Union{AbstractDict, Nothing} where {T1<:AbstractString} # get embedding of the query
tablename = "sommelier_decision_repository" # query = state[:thoughtHistory][:question]
# find similar df = findSimilarTextFromVectorDB(query, tablename,
println("\n~~~ search vectorDB for this: $recentevents ", @__FILE__, " ", @__LINE__) "function_input_embedding", executeSQLVectorDB)
df = findSimilarTextFromVectorDB(recentevents, tablename, row, col = size(df)
"function_input_embedding", executeSQLVectorDB) distance = row == 0 ? Inf : df[1, :distance]
row, col = size(df) if row == 0 || distance > maxdistance # no close enough SQL stored in the database
distance = row == 0 ? Inf : df[1, :distance] query_embedding = getEmbedding(query)[1]
if row != 0 && distance < maxdistance query = replace(query, "'" => "")
# if there is usable decision, return it. sql_base64 = base64encode(SQL)
rowid = df[1, :id] sql_ = replace(SQL, "'" => "")
println("\n~~~ found similar decision. row id $rowid, distance $distance ", @__FILE__, " ", @__LINE__)
output_b64 = df[1, :function_output_base64] # pick the closest match sql = """
_output_str = String(base64decode(output_b64)) INSERT INTO $tablename (function_input, function_output, function_output_base64, function_input_embedding) VALUES ('$query', '$sql_', '$sql_base64', '$query_embedding');
output = copy(JSON3.read(_output_str)) """
return output # println("\n~~~ added new decision to vectorDB ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
else # println(sql)
println("\n~~~ similar decision not found, max distance $maxdistance ", @__FILE__, " ", @__LINE__) _ = executeSQLVectorDB(sql)
return nothing end
end end
end
function similarSommelierDecision(recentevents::T1; maxdistance::Integer=3
function insertSommelierDecision(recentevents::T1, decision::T2; maxdistance::Integer=5 )::Union{AbstractDict, Nothing} where {T1<:AbstractString}
) where {T1<:AbstractString, T2<:AbstractDict} tablename = "sommelier_decision_repository"
tablename = "sommelier_decision_repository" # find similar
# find similar println("\n~~~ search vectorDB for this: $recentevents ", @__FILE__, " ", @__LINE__)
df = findSimilarTextFromVectorDB(recentevents, tablename, df = findSimilarTextFromVectorDB(recentevents, tablename,
"function_input_embedding", executeSQLVectorDB) "function_input_embedding", executeSQLVectorDB)
row, col = size(df) row, col = size(df)
distance = row == 0 ? Inf : df[1, :distance] distance = row == 0 ? Inf : df[1, :distance]
if row == 0 || distance > maxdistance # no close enough SQL stored in the database if row != 0 && distance < maxdistance
recentevents_embedding = a.func[:getEmbedding](recentevents)[1] # if there is usable decision, return it.
recentevents = replace(recentevents, "'" => "") rowid = df[1, :id]
decision_json = JSON3.write(decision) println("\n~~~ found similar decision. row id $rowid, distance $distance ", @__FILE__, " ", @__LINE__)
decision_base64 = base64encode(decision_json) output_b64 = df[1, :function_output_base64] # pick the closest match
decision = replace(decision_json, "'" => "") _output_str = String(base64decode(output_b64))
output = copy(JSON3.read(_output_str))
sql = """ return output
INSERT INTO $tablename (function_input, function_output, function_output_base64, function_input_embedding) VALUES ('$recentevents', '$decision', '$decision_base64', '$recentevents_embedding'); else
""" println("\n~~~ similar decision not found, max distance $maxdistance ", @__FILE__, " ", @__LINE__)
println("\n~~~ added new decision to vectorDB ", @__FILE__, " ", @__LINE__) return nothing
println(sql) end
_ = executeSQLVectorDB(sql) end
else
println("~~~ similar decision previously cached, distance $distance ", @__FILE__, " ", @__LINE__)
end function insertSommelierDecision(recentevents::T1, decision::T2; maxdistance::Integer=5
end ) where {T1<:AbstractString, T2<:AbstractDict}
tablename = "sommelier_decision_repository"
# find similar
sessionId = "12345" df = findSimilarTextFromVectorDB(recentevents, tablename,
"function_input_embedding", executeSQLVectorDB)
externalFunction = ( row, col = size(df)
getEmbedding=getEmbedding, distance = row == 0 ? Inf : df[1, :distance]
text2textInstructLLM=text2textInstructLLM, if row == 0 || distance > maxdistance # no close enough SQL stored in the database
executeSQL=executeSQL, recentevents_embedding = getEmbedding(recentevents)[1]
similarSQLVectorDB=similarSQLVectorDB, recentevents = replace(recentevents, "'" => "")
insertSQLVectorDB=insertSQLVectorDB, decision_json = JSON3.write(decision)
similarSommelierDecision=similarSommelierDecision, decision_base64 = base64encode(decision_json)
insertSommelierDecision=insertSommelierDecision, decision = replace(decision_json, "'" => "")
)
sql = """
INSERT INTO $tablename (function_input, function_output, function_output_base64, function_input_embedding) VALUES ('$recentevents', '$decision', '$decision_base64', '$recentevents_embedding');
"""
a = YiemAgent.sommelier( println("\n~~~ added new decision to vectorDB ", @__FILE__, " ", @__LINE__)
externalFunction; println(sql)
name="Ton", _ = executeSQLVectorDB(sql)
id=sessionId, # agent instance id else
retailername="Yiem", println("~~~ similar decision previously cached, distance $distance ", @__FILE__, " ", @__LINE__)
) end
end
while true
println("your respond: ")
user_answer = readline() sessionId = "12345"
response = YiemAgent.conversation(a, Dict(:text=> user_answer))
println("\n$response") externalFunction = (
end getEmbedding=getEmbedding,
text2textInstructLLM=text2textInstructLLM,
executeSQL=executeSQL,
# response = YiemAgent.conversation(a, Dict(:text=> "I want to get a French red wine under 100.")) similarSQLVectorDB=similarSQLVectorDB,
insertSQLVectorDB=insertSQLVectorDB,
similarSommelierDecision=similarSommelierDecision,
insertSommelierDecision=insertSommelierDecision,
)
a = YiemAgent.sommelier(
externalFunction;
name="Ton",
id=sessionId, # agent instance id
retailername="Yiem",
)
while true
print("\nyour respond: ")
user_answer = readline()
response = YiemAgent.conversation(a, Dict(:text=> user_answer))
println("\n$response")
end
# response = YiemAgent.conversation(a, Dict(:text=> "I want to get a French red wine under 100."))
"""
hello I want to get a bottle of red wine for my boss. I have a budget around 50 dollars. Show me some options.
I have no idea about his wine taste but he likes spicy food.
"""