Merge pull request 'v0.2.0' (#4) from v0.2.0 into main

Reviewed-on: #4
This commit is contained in:
ton
2025-05-02 08:21:05 +00:00
8 changed files with 1055 additions and 1050 deletions

View File

@@ -1,8 +1,8 @@
# This file is machine-generated - editing it directly is not advised
julia_version = "1.11.2"
julia_version = "1.11.4"
manifest_format = "2.0"
project_hash = "b483014657ef9f0fde60d7258585b291d6f0eeca"
project_hash = "cb7f3c57318e927e8ac4dc2dea9acdcace566ed1"
[[deps.AliasTables]]
deps = ["PtrArrays", "Random"]
@@ -120,9 +120,9 @@ version = "1.11.0"
[[deps.Distributions]]
deps = ["AliasTables", "FillArrays", "LinearAlgebra", "PDMats", "Printf", "QuadGK", "Random", "SpecialFunctions", "Statistics", "StatsAPI", "StatsBase", "StatsFuns"]
git-tree-sha1 = "3101c32aab536e7a27b1763c0797dba151b899ad"
git-tree-sha1 = "0b4190661e8a4e51a842070e7dd4fae440ddb7f4"
uuid = "31c24e10-a181-5473-b8eb-7969acd0382f"
version = "0.25.113"
version = "0.25.118"
[deps.Distributions.extensions]
DistributionsChainRulesCoreExt = "ChainRulesCore"
@@ -158,9 +158,9 @@ version = "0.1.10"
[[deps.FileIO]]
deps = ["Pkg", "Requires", "UUIDs"]
git-tree-sha1 = "2dd20384bf8c6d411b5c7370865b1e9b26cb2ea3"
git-tree-sha1 = "b66970a70db13f45b7e57fbda1736e1cf72174ea"
uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549"
version = "1.16.6"
version = "1.17.0"
weakdeps = ["HTTP"]
[deps.FileIO.extensions]
@@ -168,9 +168,9 @@ weakdeps = ["HTTP"]
[[deps.FilePathsBase]]
deps = ["Compat", "Dates"]
git-tree-sha1 = "7878ff7172a8e6beedd1dea14bd27c3c6340d361"
git-tree-sha1 = "3bab2c5aa25e7840a4b065805c0cdfc01f3068d2"
uuid = "48062228-2e41-5def-b9a4-89aafe57970f"
version = "0.9.22"
version = "0.9.24"
weakdeps = ["Mmap", "Test"]
[deps.FilePathsBase.extensions]
@@ -200,11 +200,9 @@ version = "1.11.0"
[[deps.GeneralUtils]]
deps = ["CSV", "DataFrames", "DataStructures", "Dates", "Distributions", "JSON3", "MQTTClient", "PrettyPrinting", "Random", "SHA", "UUIDs"]
git-tree-sha1 = "978d9a5c3fc30205dd72d4a2a2ed4fa85ebee5cf"
repo-rev = "main"
repo-url = "https://git.yiem.cc/ton/GeneralUtils"
path = "/appfolder/app/dev/GeneralUtils"
uuid = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
version = "0.1.0"
version = "0.2.3"
[[deps.HTTP]]
deps = ["Base64", "CodecZlib", "ConcurrentUtilities", "Dates", "ExceptionUnwrapping", "Logging", "LoggingExtras", "MbedTLS", "NetworkOptions", "OpenSSL", "PrecompileTools", "Random", "SimpleBufferStream", "Sockets", "URIs", "UUIDs"]
@@ -214,9 +212,9 @@ version = "1.10.13"
[[deps.HypergeometricFunctions]]
deps = ["LinearAlgebra", "OpenLibm_jll", "SpecialFunctions"]
git-tree-sha1 = "b1c2585431c382e3fe5805874bda6aea90a95de9"
git-tree-sha1 = "68c173f4f449de5b438ee67ed0c9c748dc31a2ec"
uuid = "34004b35-14d8-5ef3-9330-4cdb6864b03a"
version = "0.3.25"
version = "0.3.28"
[[deps.ICU_jll]]
deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
@@ -260,9 +258,9 @@ uuid = "41ab1584-1d38-5bbf-9106-f11c6c58b48f"
version = "1.3.0"
[[deps.IrrationalConstants]]
git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2"
git-tree-sha1 = "e2222959fbc6c19554dc15174c81bf7bf3aa691c"
uuid = "92d709cd-6900-40b7-9082-c6be49f344b6"
version = "0.2.2"
version = "0.2.4"
[[deps.IterTools]]
git-tree-sha1 = "42d5f897009e7ff2cf88db414a389e5ed1bdd023"
@@ -305,12 +303,10 @@ uuid = "b39eb1a6-c29a-53d7-8c32-632cd16f18da"
version = "1.19.3+0"
[[deps.LLMMCTS]]
deps = ["GeneralUtils", "JSON3"]
git-tree-sha1 = "d8c653b8fafbd3757b7332985efaf1fdb8b6fe97"
repo-rev = "main"
repo-url = "https://git.yiem.cc/ton/LLMMCTS"
deps = ["GeneralUtils", "JSON3", "PrettyPrinting"]
path = "/appfolder/app/dev/LLMMCTS"
uuid = "d76c5a4d-449e-4835-8cc4-dd86ec44f241"
version = "0.1.2"
version = "0.1.4"
[[deps.LaTeXStrings]]
git-tree-sha1 = "dda21b8cbd6a6c40d9d02a73230f9d70fed6918c"
@@ -370,9 +366,9 @@ version = "1.11.0"
[[deps.LogExpFunctions]]
deps = ["DocStringExtensions", "IrrationalConstants", "LinearAlgebra"]
git-tree-sha1 = "a2d09619db4e765091ee5c6ffe8872849de0feea"
git-tree-sha1 = "13ca9e2586b89836fd20cccf56e57e2b9ae7f38f"
uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688"
version = "0.3.28"
version = "0.3.29"
[deps.LogExpFunctions.extensions]
LogExpFunctionsChainRulesCoreExt = "ChainRulesCore"
@@ -475,7 +471,7 @@ version = "0.3.27+1"
[[deps.OpenLibm_jll]]
deps = ["Artifacts", "Libdl"]
uuid = "05823500-19ac-5b8b-9628-191a04bc5112"
version = "0.8.1+2"
version = "0.8.1+4"
[[deps.OpenSSL]]
deps = ["BitFlags", "Dates", "MozillaCACerts_jll", "OpenSSL_jll", "Sockets"]
@@ -493,7 +489,7 @@ version = "3.0.15+1"
deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1"
uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
version = "0.5.5+0"
version = "0.5.5+2"
[[deps.OrderedCollections]]
git-tree-sha1 = "12f1439c4f986bb868acda6ea33ebc78e19b95ad"
@@ -502,9 +498,9 @@ version = "1.7.0"
[[deps.PDMats]]
deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"]
git-tree-sha1 = "949347156c25054de2db3b166c52ac4728cbad65"
git-tree-sha1 = "48566789a6d5f6492688279e22445002d171cf76"
uuid = "90014a1f-27ba-587c-ab20-58faa44d9150"
version = "0.11.31"
version = "0.11.33"
[[deps.Parsers]]
deps = ["Dates", "PrecompileTools", "UUIDs"]
@@ -556,15 +552,15 @@ uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
version = "1.11.0"
[[deps.PtrArrays]]
git-tree-sha1 = "77a42d78b6a92df47ab37e177b2deac405e1c88f"
git-tree-sha1 = "1d36ef11a9aaf1e8b74dacc6a731dd1de8fd493d"
uuid = "43287f4e-b6f4-7ad1-bb20-aadabca52c3d"
version = "1.2.1"
version = "1.3.0"
[[deps.QuadGK]]
deps = ["DataStructures", "LinearAlgebra"]
git-tree-sha1 = "cda3b045cf9ef07a08ad46731f5a3165e56cf3da"
git-tree-sha1 = "9da16da70037ba9d701192e27befedefb91ec284"
uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc"
version = "2.11.1"
version = "2.11.2"
[deps.QuadGK.extensions]
QuadGKEnzymeExt = "Enzyme"
@@ -623,11 +619,9 @@ version = "0.7.0"
[[deps.SQLLLM]]
deps = ["CSV", "DataFrames", "DataStructures", "Dates", "FileIO", "GeneralUtils", "HTTP", "JSON3", "LLMMCTS", "LibPQ", "PrettyPrinting", "Random", "Revise", "StatsBase", "Tables", "URIs", "UUIDs"]
git-tree-sha1 = "45e660e44de0950a5e5f92d467298d8b768b6023"
repo-rev = "main"
repo-url = "https://git.yiem.cc/ton/SQLLLM"
path = "/appfolder/app/dev/SQLLLM"
uuid = "2ebc79c7-cc10-4a3a-9665-d2e1d61e63d3"
version = "0.2.0"
version = "0.2.4"
[[deps.SQLStrings]]
git-tree-sha1 = "55de0530689832b1d3d43491ee6b67bd54d3323c"
@@ -672,9 +666,9 @@ version = "1.11.0"
[[deps.SpecialFunctions]]
deps = ["IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"]
git-tree-sha1 = "2f5d4697f21388cbe1ff299430dd169ef97d7e14"
git-tree-sha1 = "64cca0c26b4f31ba18f13f6c12af7c85f478cfde"
uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
version = "2.4.0"
version = "2.5.0"
[deps.SpecialFunctions.extensions]
SpecialFunctionsChainRulesCoreExt = "ChainRulesCore"
@@ -699,16 +693,16 @@ uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0"
version = "1.7.0"
[[deps.StatsBase]]
deps = ["DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "5cf7606d6cef84b543b483848d4ae08ad9832b21"
deps = ["AliasTables", "DataAPI", "DataStructures", "LinearAlgebra", "LogExpFunctions", "Missings", "Printf", "Random", "SortingAlgorithms", "SparseArrays", "Statistics", "StatsAPI"]
git-tree-sha1 = "29321314c920c26684834965ec2ce0dacc9cf8e5"
uuid = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
version = "0.34.3"
version = "0.34.4"
[[deps.StatsFuns]]
deps = ["HypergeometricFunctions", "IrrationalConstants", "LogExpFunctions", "Reexport", "Rmath", "SpecialFunctions"]
git-tree-sha1 = "b423576adc27097764a90e163157bcfc9acf0f46"
git-tree-sha1 = "35b09e80be285516e52c9054792c884b9216ae3c"
uuid = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
version = "1.3.2"
version = "1.4.0"
[deps.StatsFuns.extensions]
StatsFunsChainRulesCoreExt = "ChainRulesCore"

View File

@@ -1,9 +1,10 @@
name = "YiemAgent"
uuid = "e012c34b-7f78-48e0-971c-7abb83b6f0a2"
authors = ["narawat lamaiin <narawat@outlook.com>"]
version = "0.1.4"
version = "0.2.0"
[deps]
CSV = "336ed68f-0bac-5ca0-87d4-7b16caf5d00b"
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
Dates = "ade2ca70-3891-5945-98fb-dc099432e06a"
@@ -21,7 +22,5 @@ URIs = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
[compat]
CSV = "0.10.15"
DataFrames = "1.7.0"
GeneralUtils = "0.1, 0.2"
LLMMCTS = "0.1.2"
SQLLLM = "0.2.0"

View File

@@ -0,0 +1,72 @@
To make **LLM-driven inference** fast while maintaining its dynamic capabilities, there are a few practices or approaches to avoid, as they could lead to performance bottlenecks or inefficiencies. Here's what *not* to do:
---
### **1. Avoid Using Overly Large Models for Every Query**
While larger LLMs like GPT-4 provide high accuracy and nuanced responses, they may slow down real-time processing due to their computational complexity. Instead:
- Use distilled or smaller models (e.g., GPT-3.5 Turbo or fine-tuned versions) for faster inference without compromising much on quality.
---
### **2. Avoid Excessive Entity Preprocessing**
Dont rely on overly complicated preprocessing steps (like advanced NER models or regex-heavy pipelines) to extract entities from the query before invoking the LLM. This could add latency. Instead:
- Design efficient prompts that allow the LLM to extract entities and generate responses simultaneously.
---
### **3. Avoid Asking the LLM Multiple Separate Questions**
Running the LLM for multiple subtasks—for example, entity extraction first and response generation second—can significantly slow down the pipeline. Instead:
- Create prompts that combine tasks into one pass, e.g., *"Identify the city name and generate a weather response for this query: 'What's the weather in London?'"*.
---
### **4. Dont Overload the LLM with Context History**
Excessively lengthy conversation history or irrelevant context in your prompts can slow down inference times. Instead:
- Provide only the relevant context for each query, trimming unnecessary parts of the conversation.
---
### **5. Avoid Real-Time Dependence on External APIs**
Using external APIs to fetch supplementary data (e.g., weather details or location info) during every query can introduce latency. Instead:
- Pre-fetch API data asynchronously and use the LLM to integrate it dynamically into responses.
---
### **6. Avoid Running LLM on Underpowered Hardware**
Running inference on CPUs or low-spec GPUs will result in slower response times. Instead:
- Deploy the LLM on optimized infrastructure (e.g., high-performance GPUs like NVIDIA A100 or cloud platforms like Azure AI) to reduce latency.
---
### **7. Skip Lengthy Generative Prompts**
Avoid prompts that encourage the LLM to produce overly detailed or verbose responses, as these take longer to process. Instead:
- Use concise prompts that focus on generating actionable or succinct answers.
---
### **8. Dont Ignore Optimization Techniques**
Failing to optimize your LLM setup can drastically impact performance. For example:
- Avoid skipping techniques like model quantization (reducing numerical precision to speed up inference) or distillation (training smaller models).
---
### **9. Dont Neglect Response Caching**
While you may not want a full caching system to avoid sunk costs, dismissing lightweight caching entirely can impact speed. Instead:
- Use temporary session-based caching for very frequent queries, without committing to a full-fledged cache infrastructure.
---
### **10. Avoid One-Size-Fits-All Solutions**
Applying the same LLM inference method to all queries—whether simple or complex—will waste processing resources. Instead:
- Route basic queries to faster, specialized models and use the LLM for nuanced or multi-step queries only.
---
### Summary: Focus on Efficient Design
By avoiding these pitfalls, you can ensure that LLM-driven inference remains fast and responsive:
- Optimize prompts.
- Use smaller models for simpler queries.
- Run the LLM on high-performance hardware.
- Trim unnecessary preprocessing or contextual steps.
Would you like me to help refine a prompt or suggest specific tools to complement your implementation? Let me know!

File diff suppressed because it is too large Load Diff

View File

@@ -296,13 +296,14 @@ function checkinventory(a::T1, input::T2
wineattributes_2 = extractWineAttributes_2(a, input)
_inventoryquery = "retailer name: $(a.retailername), $wineattributes_1, $wineattributes_2"
inventoryquery = "Retrieves winery, wine_name, vintage, region, country, wine_type, grape, serving_temperature, sweetness, intensity, tannin, acidity, tasting_notes, price and currency of wines that match the following criteria - {$_inventoryquery}"
inventoryquery = "Retrieves winery, wine_name, wine_id, vintage, region, country, wine_type, grape, serving_temperature, sweetness, intensity, tannin, acidity, tasting_notes, price and currency of wines that match the following criteria - {$_inventoryquery}"
println("\ncheckinventory input: $inventoryquery ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
# add suppport for similarSQLVectorDB
textresult, rawresponse = SQLLLM.query(inventoryquery, a.func[:executeSQL],
a.func[:text2textInstructLLM],
a.func[:text2textInstructLLM];
insertSQLVectorDB=a.func[:insertSQLVectorDB],
similarSQLVectorDB=a.func[:similarSQLVectorDB])
similarSQLVectorDB=a.func[:similarSQLVectorDB],
llmFormatName="qwen3")
println("\ncheckinventory result ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
println(textresult)
@@ -330,7 +331,8 @@ julia>
# Signature
"""
function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<:AbstractString}
function extractWineAttributes_1(a::T1, input::T2; maxattempt=10
)::String where {T1<:agent, T2<:AbstractString}
systemmsg =
"""
@@ -384,19 +386,24 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
header = ["Thought:", "Wine_name:", "Winery:", "Vintage:", "Region:", "Country:", "Wine_type:", "Grape_varietal:", "Tasting_notes:", "Wine_price:", "Occasion:", "Food_to_be_paired_with_wine:"]
dictkey = ["thought", "wine_name", "winery", "vintage", "region", "country", "wine_type", "grape_varietal", "tasting_notes", "wine_price", "occasion", "food_to_be_paired_with_wine"]
errornote = ""
errornote = "N/A"
for attempt in 1:10
#[WORKING] I should add generatequestion()
llmkwargs=Dict(
:num_ctx => 32768,
:temperature => 0.5,
)
for attempt in 1:maxattempt
#[PENDING] I should add generatequestion()
if attempt > 1
println("\nYiemAgent extractWineAttributes_1() attempt $attempt/10 ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
println("\nYiemAgent extractWineAttributes_1() attempt $attempt/$maxattempt ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
end
usermsg =
"""
User's query: $input
$errornote
P.S. $errornote
"""
_prompt =
@@ -406,29 +413,33 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
response = a.func[:text2textInstructLLM](prompt)
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
response = a.func[:text2textInstructLLM](prompt;
modelsize="medium", llmkwargs=llmkwargs, senderId=a.id)
response = GeneralUtils.remove_french_accents(response)
response = GeneralUtils.deFormatLLMtext(response, "granite3")
# check wheter all attributes are in the response
checkFlag = false
for word in header
if !occursin(word, response)
errornote = "$word attribute is missing in previous attempts"
println("Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
errornote = "In your previous attempts, the $word attribute is missing. Please try again."
println("\nYiemAgent extractWineAttributes_1() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
checkFlag = true
break
end
end
checkFlag == true ? continue : nothing
# check whether response has all header
# check whether response has all answer's key points
detected_kw = GeneralUtils.detect_keyword(header, response)
if 0 values(detected_kw)
errornote = "\nYiemAgent extractWineAttributes_1() response does not have all header"
errornote = "In your previous attempts, the response does not have all answer's key points"
println("\nYiemAgent extractWineAttributes_1() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
elseif sum(values(detected_kw)) > length(header)
errornote = "\nYiemAgent extractWineAttributes_1() response has duplicated header"
errornote = "In your previous attempts, the response has duplicated answer's key points"
println("\nYiemAgent extractWineAttributes_1() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
end
responsedict = GeneralUtils.textToDict(response, header;
@@ -452,8 +463,8 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
if responsedict[:wine_price] != "NA"
# check whether wine_price is in ranged number
if !occursin('-', responsedict[:wine_price])
errornote = "wine_price must be a range number"
println("ERROR YiemAgent extractWineAttributes_1() $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
errornote = "In your previous attempt, the 'wine_price' was not set to a ranged number. Please adjust it accordingly."
println("\nERROR YiemAgent extractWineAttributes_1() $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
checkFlag = true
break
end
@@ -467,8 +478,8 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
end
# price range like 100-100 is not good
if minprice == maxprice
errornote = "wine_price with minimum equals to maximum is not valid"
println("ERROR YiemAgent extractWineAttributes_1() $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
errornote = "In your previous attempt, you inputted 'wine_price' with a 'minimum' value equaling the 'maximum', which is not valid."
println("\nERROR YiemAgent extractWineAttributes_1() $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
checkFlag = true
break
end
@@ -568,14 +579,12 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
The preference form requires the following information:
sweetness, acidity, tannin, intensity
<You must follow the following guidelines>
You must follow the following guidelines:
1) If specific information required in the preference form is not available in the query or there isn't any, mark with 'NA' to indicate this.
Additionally, words like 'any' or 'unlimited' mean no information is available.
2) Use the conversion table to convert the descriptive word level of sweetness, intensity, tannin, and acidity into a corresponding integer.
3) Do not generate other comments.
</You must follow the following guidelines>
<You should then respond to the user with>
You should then respond to the user with:
Sweetness_keyword: The exact keywords in the user's query describing the sweetness level of the wine.
Sweetness: ( S ), where ( S ) represents integers indicating the range of sweetness levels. Example: 1-2
Acidity_keyword: The exact keywords in the user's query describing the acidity level of the wine.
@@ -584,9 +593,7 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
Tannin: ( T ), where ( T ) represents integers indicating the range of tannin level. Example: 1-3
Intensity_keyword: The exact keywords in the user's query describing the intensity level of the wine.
Intensity: ( I ), where ( I ) represents integers indicating the range of intensity level. Example: 2-4
</You should then respond to the user with>
<You should only respond in format as described below>
You should only respond in format as described below:
Sweetness_keyword: ...
Sweetness: ...
Acidity_keyword: ...
@@ -595,9 +602,8 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
Tannin: ...
Intensity_keyword: ...
Intensity: ...
</You should only respond in format as described below>
<Here are some examples>
Here are some examples:
User's query: I want a wine with a medium-bodied, low acidity, medium tannin.
Sweetness_keyword: NA
Sweetness: NA
@@ -617,7 +623,6 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
Tannin: NA
Intensity_keyword: NA
Intensity: NA
</Here are some examples>
Let's begin!
"""
@@ -630,7 +635,7 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
"""
$conversiontable
User's query: $input
$errornote
P.S. $errornote
"""
_prompt =
@@ -640,17 +645,20 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
response = a.func[:text2textInstructLLM](prompt)
response = GeneralUtils.deFormatLLMtext(response, "granite3")
# check whether response has all header
# check whether response has all answer's key points
detected_kw = GeneralUtils.detect_keyword(header, response)
if 0 values(detected_kw)
errornote = "\nYiemAgent extractWineAttributes_2() response does not have all header"
errornote = "In your previous attempt does not have all answer's key points"
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
elseif sum(values(detected_kw)) > length(header)
errornote = "\nYiemAgent extractWineAttributes_2() response has duplicated header"
errornote = "In your previous attempt has duplicated answer's key points"
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
end
@@ -662,8 +670,8 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
keyword = Symbol(i * "_keyword") # e.g. sweetness_keyword
value = responsedict[keyword]
if value != "NA" && !occursin(value, input)
errornote = "WARNING. Keyword $keyword: $value does not appear in the input. You must use information from the input only"
println("Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
errornote = "In your previous attempt, keyword $keyword: $value does not appear in the input. You must use information from the input only"
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
end
@@ -679,7 +687,7 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
if !occursin("keyword", string(k))
if v !== "NA" && (!occursin('-', v) || length(v) > 5)
errornote = "WARNING: The non-range value {$k: $v} is not allowed. It should be specified in a range format, i.e. min-max."
println("Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
end
end
@@ -755,7 +763,7 @@ function paraphrase(text2textInstructLLM::Function, text::String)
for attempt in 1:10
usermsg = """
Text: $text
$errornote
P.S. $errornote
"""
_prompt =
@@ -765,10 +773,11 @@ function paraphrase(text2textInstructLLM::Function, text::String)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
try
response = text2textInstructLLM(prompt)
response = GeneralUtils.deFormatLLMtext(response, "granite3")
# sometime the model response like this "here's how I would respond: ..."
if occursin("respond:", response)
errornote = "You don't need to intro your response"
@@ -780,13 +789,13 @@ function paraphrase(text2textInstructLLM::Function, text::String)
response = replace(response, '`' => "")
response = GeneralUtils.remove_french_accents(response)
# check whether response has all header
# check whether response has all answer's key points
detected_kw = GeneralUtils.detect_keyword(header, response)
if 0 values(detected_kw)
errornote = "\nYiemAgent paraphrase() response does not have all header"
errornote = "\nYiemAgent paraphrase() response does not have all answer's key points"
continue
elseif sum(values(detected_kw)) > length(header)
errornote = "\nnYiemAgent paraphrase() response has duplicated header"
errornote = "\nnYiemAgent paraphrase() response has duplicated answer's key points"
continue
end
@@ -984,7 +993,7 @@ end
# ]
# # put in model format
# prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
# prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
# prompt *=
# """
# <|start_header_id|>assistant<|end_header_id|>

View File

@@ -9,11 +9,48 @@ using GeneralUtils
abstract type agent end
mutable struct companion <: agent
name::String # agent name
id::String # agent id
systemmsg::Union{String, Nothing}
systemmsg::String # system message
tools::Dict # tools
maxHistoryMsg::Integer # e.g. 21th and earlier messages will get summarized
chathistory::Vector{Dict{Symbol, Any}}
memory::Dict{Symbol, Any}
func::NamedTuple # NamedTuple of functions
llmFormatName::String
end
function companion(
func::NamedTuple # NamedTuple of functions
;
systemmsg::Union{String, Nothing}= nothing,
name::String= "Assistant",
id::String= GeneralUtils.uuid4snakecase(),
maxHistoryMsg::Integer= 20,
chathistory::Vector{Dict{Symbol, String}} = Vector{Dict{Symbol, String}}(),
llmFormatName::String= "granite3"
)
if systemmsg === nothing
systemmsg =
"""
Your name: $name
Your sex: Female
Your role: You are a helpful assistant.
You should follow the following guidelines:
- Focus on the latest conversation.
- Your like to be short and concise.
Let's begin!
"""
end
tools = Dict( # update input format
"CHATBOX"=> Dict(
:description => "- CHATBOX which you can use to talk with the user. The input is your intentions for the dialogue. Be specific.",
),
)
""" Memory
Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3
@@ -22,38 +59,23 @@ mutable struct companion <: agent
Dict(:name=>"user", :text=> "Wassup!", :timestamp=> Dates.now()),
Dict(:name=>"assistant", :text=> "Hi I'm your assistant.", :timestamp=> Dates.now()),
]
"""
chathistory::Vector{Dict{Symbol, Any}}
memory::Dict{Symbol, Any}
# communication function
text2textInstructLLM::Function
end
function companion(
text2textInstructLLM::Function
;
id::String= string(uuid4()),
systemmsg::Union{String, Nothing}= nothing,
maxHistoryMsg::Integer= 20,
chathistory::Vector{Dict{Symbol, String}} = Vector{Dict{Symbol, String}}(),
)
memory = Dict{Symbol, Any}(
:chatbox=> "",
:shortmem=> OrderedDict{Symbol, Any}(),
:events=> Vector{Dict{Symbol, Any}}(),
:state=> Dict{Symbol, Any}(),
:state=> Dict{Symbol, Any}(), # state of the agent
:recap=> OrderedDict{Symbol, Any}(), # recap summary of the conversation
)
newAgent = companion(
name,
id,
systemmsg,
tools,
maxHistoryMsg,
chathistory,
memory,
text2textInstructLLM
func,
llmFormatName
)
return newAgent
@@ -61,6 +83,7 @@ end
""" A sommelier agent.
# Arguments
@@ -134,19 +157,10 @@ mutable struct sommelier <: agent
retailername::String
tools::Dict
maxHistoryMsg::Integer # e.g. 21th and earlier messages will get summarized
""" Memory
Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3
NO "system" message in chathistory because I want to add it at the inference time
chathistory= [
Dict(:name=>"user", :text=> "Wassup!", :timestamp=> Dates.now()),
Dict(:name=>"assistant", :text=> "Hi I'm your assistant.", :timestamp=> Dates.now()),
]
"""
chathistory::Vector{Dict{Symbol, Any}}
memory::Dict{Symbol, Any}
func # NamedTuple of functions
llmFormatName::String
end
function sommelier(
@@ -157,6 +171,7 @@ function sommelier(
retailername::String= "retailer_name",
maxHistoryMsg::Integer= 20,
chathistory::Vector{Dict{Symbol, String}} = Vector{Dict{Symbol, String}}(),
llmFormatName::String= "granite3"
)
tools = Dict( # update input format
@@ -170,16 +185,17 @@ function sommelier(
:input => """<input>Input is a JSON-formatted string that contains a detailed and precise search query.</input><input example>{\"wine type\": \"rose\", \"price\": \"max 35\", \"sweetness level\": \"sweet\", \"intensity level\": \"light bodied\", \"Tannin level\": \"low\", \"Acidity level\": \"low\"}</input example>""",
:output => """<output>Output are wines that match the search query in JSON format.""",
),
# "finalanswer"=> Dict(
# :description => "<tool description>Useful for when you are ready to recommend wines to the user.</tool description>",
# :input => """<input format>{\"finalanswer\": \"some text\"}.</input format><input example>{\"finalanswer\": \"I recommend Zena Crown Vista\"}</input example>""",
# :output => "" ,
# :func => nothing,
# ),
)
""" Memory
Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3
NO "system" message in chathistory because I want to add it at the inference time
chathistory= [
Dict(:name=>"user", :text=> "Wassup!", :timestamp=> Dates.now()),
Dict(:name=>"assistant", :text=> "Hi I'm your assistant.", :timestamp=> Dates.now()),
]
"""
memory = Dict{Symbol, Any}(
:chatbox=> "",
:shortmem=> OrderedDict{Symbol, Any}(
:available_wine=> [],
:found_wine=> [], # used by decisionMaker(). This is to prevent decisionMaker() keep presenting the same wines
@@ -198,7 +214,8 @@ function sommelier(
maxHistoryMsg,
chathistory,
memory,
func
func,
llmFormatName
)
return newAgent

View File

@@ -154,11 +154,11 @@ function chatHistoryToText(vecd::Vector; withkey=true, range=nothing)::String
# Loop through each dictionary in the input vector
for d in elements
# Extract the 'name' and 'text' keys from the dictionary
name = d[:name]
name = titlecase(d[:name])
_text = d[:text]
# Append the formatted string to the text variable
text *= "$name:> $_text \n"
text *= "$name> $_text \n"
end
else
# Loop through each dictionary in the input vector
@@ -239,7 +239,8 @@ function eventdict(;
outcome::Union{String, Nothing}=nothing,
note::Union{String, Nothing}=nothing,
)
return Dict{Symbol, Any}(
d = Dict{Symbol, Any}(
:event_description=> event_description,
:timestamp=> timestamp,
:subject=> subject,
@@ -252,6 +253,8 @@ function eventdict(;
:outcome=> outcome,
:note=> note,
)
return d
end
@@ -310,221 +313,34 @@ function createTimeline(events::T1; eventindex::Union{UnitRange, Nothing}=nothin
end
# function createTimeline(events::T1; eventindex::Union{UnitRange, Nothing}=nothing
# ) where {T1<:AbstractVector}
# # Initialize empty timeline string
# timeline = ""
# """ Convert a single chat dictionary into LLM model instruct format.
# # Llama 3 instruct format example
# <|system|>
# You are a helpful AI assistant.<|end|>
# <|user|>
# I am going to Paris, what should I see?<|end|>
# <|assistant|>
# Paris, the capital of France, is known for its stunning architecture, art museums."<|end|>
# <|user|>
# What is so great about #1?<|end|>
# <|assistant|>
# # Arguments
# - `name::T`
# message owner name e.f. "system", "user" or "assistant"
# - `text::T`
# # Return
# - `formattedtext::String`
# text formatted to model format
# # Example
# ```jldoctest
# julia> using Revise
# julia> using YiemAgent
# julia> d = Dict(:name=> "system",:text=> "You are a helpful, respectful and honest assistant.",)
# julia> formattedtext = YiemAgent.formatLLMtext_phi3instruct(d[:name], d[:text])
# ```
# Signature
# """
# function formatLLMtext_phi3instruct(name::T, text::T) where {T<:AbstractString}
# formattedtext =
# """
# <|$name|>
# $text<|end|>\n
# """
# return formattedtext
# end
# """ Convert a single chat dictionary into LLM model instruct format.
# # Llama 3 instruct format example
# <|begin_of_text|>
# <|start_header_id|>system<|end_header_id|>
# You are a helpful assistant.
# <|eot_id|>
# <|start_header_id|>user<|end_header_id|>
# Get me an icecream.
# <|eot_id|>
# <|start_header_id|>assistant<|end_header_id|>
# Go buy it yourself at 7-11.
# <|eot_id|>
# # Arguments
# - `name::T`
# message owner name e.f. "system", "user" or "assistant"
# - `text::T`
# # Return
# - `formattedtext::String`
# text formatted to model format
# # Example
# ```jldoctest
# julia> using Revise
# julia> using YiemAgent
# julia> d = Dict(:name=> "system",:text=> "You are a helpful, respectful and honest assistant.",)
# julia> formattedtext = YiemAgent.formatLLMtext_llama3instruct(d[:name], d[:text])
# "<|begin_of_text|>\n <|start_header_id|>system<|end_header_id|>\n You are a helpful, respectful and honest assistant.\n <|eot_id|>\n"
# ```
# Signature
# """
# function formatLLMtext_llama3instruct(name::T, text::T) where {T<:AbstractString}
# formattedtext =
# if name == "system"
# """
# <|begin_of_text|>
# <|start_header_id|>$name<|end_header_id|>
# $text
# <|eot_id|>
# """
# # Determine which indices to use - either provided range or full length
# ind =
# if eventindex !== nothing
# [eventindex...]
# else
# """
# <|start_header_id|>$name<|end_header_id|>
# $text
# <|eot_id|>
# """
# 1:length(events)
# end
# return formattedtext
# end
# # Iterate through events and format each one
# for (i, event) in zip(ind, events)
# # If no outcome exists, format without outcome
# subject = titlecase(event[:subject])
# if event[:outcome] === nothing
# """ Convert a chat messages in vector of dictionary into LLM model instruct format.
# # Arguments
# - `messages::Vector{Dict{Symbol, T}}`
# message owner name e.f. "system", "user" or "assistant"
# - `formatname::T`
# format name to be used
# # Return
# - `formattedtext::String`
# text formatted to model format
# # Example
# ```jldoctest
# julia> using Revise
# julia> using YiemAgent
# julia> chatmessage = [
# Dict(:name=> "system",:text=> "You are a helpful, respectful and honest assistant.",),
# Dict(:name=> "user",:text=> "list me all planets in our solar system.",),
# Dict(:name=> "assistant",:text=> "I'm sorry. I don't know. You tell me.",),
# ]
# julia> formattedtext = YiemAgent.formatLLMtext(chatmessage, "llama3instruct")
# "<|begin_of_text|>\n <|start_header_id|>system<|end_header_id|>\n You are a helpful, respectful and honest assistant.\n <|eot_id|>\n <|start_header_id|>user<|end_header_id|>\n list me all planets in our solar system.\n <|eot_id|>\n <|start_header_id|>assistant<|end_header_id|>\n I'm sorry. I don't know. You tell me.\n <|eot_id|>\n"
# ```
# # Signature
# """
# function formatLLMtext(messages::Vector{Dict{Symbol, T}},
# formatname::String="llama3instruct") where {T<:Any}
# f = if formatname == "llama3instruct"
# formatLLMtext_llama3instruct
# elseif formatname == "mistral"
# # not define yet
# elseif formatname == "phi3instruct"
# formatLLMtext_phi3instruct
# timeline *= "Event_$i) Who: $subject Action_name: $(event[:actionname]) Action_input: $(event[:actioninput])\n"
# # If outcome exists, include it in formatting
# else
# error("$formatname template not define yet")
# end
# str = ""
# for t in messages
# str *= f(t[:name], t[:text])
# end
# # add <|assistant|> so that the model don't generate it and I don't need to clean it up later
# if formatname == "phi3instruct"
# str *= "<|assistant|>\n"
# end
# return str
# end
# """
# Arguments\n
# -----
# Return\n
# -----
# Example\n
# -----
# ```jldoctest
# julia>
# ```
# TODO\n
# -----
# [] update docstring
# [PENDING] implement the function
# Signature\n
# -----
# """
# function iterativeprompting(a::T, prompt::String, verification::Function) where {T<:agent}
# msgMeta = GeneralUtils.generate_msgMeta(
# a.config[:externalService][:text2textinstruct],
# senderName= "iterativeprompting",
# senderId= a.id,
# receiverName= "text2textinstruct",
# )
# outgoingMsg = Dict(
# :msgMeta=> msgMeta,
# :payload=> Dict(
# :text=> prompt,
# )
# )
# success = nothing
# result = nothing
# critique = ""
# # iteration loop
# while true
# # send prompt to LLM
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
# error("--> iterativeprompting")
# # check for correctness and get feedback
# success, _critique = verification(response)
# if success
# result = response
# break
# else
# # add critique to prompt
# critique *= _critique * "\n"
# replace!(prompt, "Critique: ..." => "Critique: $critique")
# timeline *= "Event_$i) Who: $subject Action_name: $(event[:actionname]) Action_input: $(event[:actioninput]) Action output: $(event[:outcome])\n"
# end
# end
# return (success=success, result=result)
# # Return formatted timeline string
# return timeline
# end
@@ -555,11 +371,6 @@ end

View File

@@ -36,7 +36,12 @@ function executeSQLVectorDB(sql)
return result
end
function text2textInstructLLM(prompt::String; maxattempt::Integer=2, modelsize::String="medium")
function text2textInstructLLM(prompt::String; maxattempt::Integer=3, modelsize::String="medium",
llmkwargs=Dict(
:num_ctx => 32768,
:temperature => 0.1,
)
)
msgMeta = GeneralUtils.generate_msgMeta(
config[:externalservice][:loadbalancer][:mqtttopic];
msgPurpose="inference",
@@ -51,10 +56,7 @@ function text2textInstructLLM(prompt::String; maxattempt::Integer=2, modelsize::
:msgMeta => msgMeta,
:payload => Dict(
:text => prompt,
:kwargs => Dict(
:num_ctx => 16384,
:temperature => 0.2,
)
:kwargs => llmkwargs
)
)
@@ -195,7 +197,7 @@ function insertSommelierDecision(recentevents::T1, decision::T2; maxdistance::In
row, col = size(df)
distance = row == 0 ? Inf : df[1, :distance]
if row == 0 || distance > maxdistance # no close enough SQL stored in the database
recentevents_embedding = a.func[:getEmbedding](recentevents)[1]
recentevents_embedding = getEmbedding(recentevents)[1]
recentevents = replace(recentevents, "'" => "")
decision_json = JSON3.write(decision)
decision_base64 = base64encode(decision_json)