This commit is contained in:
narawat lamaiin
2025-05-01 07:59:37 +07:00
parent 03de659c9b
commit cccad676db
3 changed files with 92 additions and 86 deletions

View File

@@ -151,7 +151,7 @@ function decisionMaker(a::T; recent::Integer=10, maxattempt=10
end
end
recentrecap = GeneralUtils.dictToString_noKey(_recentrecap)
# recentrecap = GeneralUtils.dictToString_noKey(_recentrecap)
# similarDecision = a.func[:similarSommelierDecision](recentrecap)
similarDecision = nothing #CHANGE
@@ -179,14 +179,18 @@ function decisionMaker(a::T; recent::Integer=10, maxattempt=10
errornote = ""
response = nothing # placeholder for show when error msg show up
#[WORKING] add 1) 3 decisions samples 2) compare and choose the best decision (correct tolls etc)
#[PENDING] add 1) 3 decisions samples 2) compare and choose the best decision (correct tolls etc)
llmkwargs=Dict(
:num_ctx => 32768,
:temperature => 0.5,
)
for attempt in 1:maxattempt
if attempt > 1
println("\nYiemAgent decisionMaker() attempt $attempt/$maxattempt ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
llmkwargs[:temperature] += 0.1
end
QandA = generatequestion(a, a.func[:text2textInstructLLM]; recent=3)
QandA = generatequestion(a, a.func[:text2textInstructLLM], timeline)
systemmsg =
"""
Your name is $(a.name). You are a helpful English-speaking assistant, acting as a polite, website-based sommelier for $(a.retailername)'s wine store.
@@ -228,9 +232,9 @@ function decisionMaker(a::T; recent::Integer=10, maxattempt=10
You should then respond to the user with interleaving Thought, Plan, Action_name, Action_input:
1) Thought: Articulate your current understanding and consider the current situation.
2) Plan: Based on the current situation, state a complete action plan to complete the task. Be specific.
3) Action_name: (Typically corresponds to the execution of the first step in your plan) Can be one of the following function names:
3) Action_name: (Typically corresponds to the execution of the first step in your plan) Can be one of the following tool names:
- CHATBOX which you can use to talk with the user. The input is your intentions for the dialogue. Be specific.
- CHECKINVENTORY allows you to check information about wines you want in your inventory. The input should be a specific search term in verbal English. A good search term should include details such as winery, wine name, vintage, region, country, wine type, grape varietal, tasting notes, wine price, occasion, food to be paired with wine, intensity, tannin, sweetness, acidity.
- CHECKINVENTORY allows you to check information about wines you want in your inventory. The input should be a specific search term in verbal English. A good search term should include details such as price range, winery, wine name, vintage, region, country, wine type, grape varietal, tasting notes, occasion, food to be paired with wine, intensity, tannin, sweetness, acidity.
Invalid query example: red wine that pair well with spicy food.
- PRESENTBOX which you can use to present wines you have found in your inventory to the user. The input are wine names that you want to present.
- ENDCONVERSATION which you can use to properly end the conversation with the user. Input is "NA".
@@ -286,13 +290,13 @@ function decisionMaker(a::T; recent::Integer=10, maxattempt=10
# end
# change qwen format put in model format
prompt = GeneralUtils.formatLLMtext(unformatPrompt, "granite3")
response = a.func[:text2textInstructLLM](prompt)
prompt = GeneralUtils.formatLLMtext(unformatPrompt, a.llmFormatName)
response = a.func[:text2textInstructLLM](prompt; senderId=a.id, llmkwargs=llmkwargs)
response = GeneralUtils.remove_french_accents(response)
response = replace(response, "**"=>"")
response = replace(response, "***"=>"")
response = replace(response, "<|eot_id|>"=>"")
response = GeneralUtils.deFormatLLMtext(response, "granite3")
response = GeneralUtils.deFormatLLMtext(response, a.llmFormatName)
# check if response contain more than one functions from ["CHATBOX", "CHECKINVENTORY", "ENDCONVERSATION"]
count = 0
@@ -601,13 +605,13 @@ end
# # end
# # change qwen format put in model format
# prompt = GeneralUtils.formatLLMtext(unformatPrompt, "granite3")
# prompt = GeneralUtils.formatLLMtext(unformatPrompt, "qwen3")
# response = a.func[:text2textInstructLLM](prompt)
# response = GeneralUtils.remove_french_accents(response)
# response = replace(response, "**"=>"")
# response = replace(response, "***"=>"")
# response = replace(response, "<|eot_id|>"=>"")
# response = GeneralUtils.deFormatLLMtext(response, "granite3")
# response = GeneralUtils.deFormatLLMtext(response, "qwen3")
# # check if response contain more than one functions from ["CHATBOX", "CHECKINVENTORY", "ENDCONVERSATION"]
# count = 0
@@ -844,17 +848,17 @@ function evaluator(state::T1, text2textInstructLLM::Function
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
prompt = GeneralUtils.formatLLMtext(_prompt, "qwen3")
header = ["Trajectory_evaluation:", "Answer_evaluation:", "Accepted_as_answer:", "Score:", "Suggestion:"]
dictkey = ["trajectory_evaluation", "answer_evaluation", "accepted_as_answer", "score", "suggestion"]
response = text2textInstructLLM(prompt, modelsize="medium")
response = text2textInstructLLM(prompt; modelsize="medium", senderId=a.id)
# sometime LLM output something like **Comprehension**: which is not expected
response = replace(response, "**"=>"")
response = replace(response, "***"=>"")
response = GeneralUtils.deFormatLLMtext(response, "granite3")
response = GeneralUtils.deFormatLLMtext(response, "qwen3")
# check whether response has all header
detected_kw = GeneralUtils.detect_keyword(header, response)
@@ -1055,7 +1059,7 @@ julia>
# Signature
"""
function think(a::T)::NamedTuple{(:actionname, :result),Tuple{String,String}} where {T<:agent}
a.memory[:recap] = generateSituationReport(a, a.func[:text2textInstructLLM]; skiprecent=0)
# a.memory[:recap] = generateSituationReport(a, a.func[:text2textInstructLLM]; skiprecent=0)
thoughtDict = decisionMaker(a; recent=5)
actionname = thoughtDict[:action_name]
@@ -1240,10 +1244,10 @@ function presentbox(a::sommelier, thoughtDict)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
response = a.func[:text2textInstructLLM](prompt)
prompt = GeneralUtils.formatLLMtext(_prompt, a.llmFormatName)
#[WORKING] update this function to use new llm
response = a.func[:text2textInstructLLM](prompt; senderId=a.id)
response = GeneralUtils.deFormatLLMtext(response, a.llmFormatName)
# check whether response has not-allowed words
notallowed = ["respond:", "user>", "user:"]
detected_kw = GeneralUtils.detect_keyword(notallowed, response)
@@ -1261,7 +1265,7 @@ function presentbox(a::sommelier, thoughtDict)
response = replace(response, '`' => "")
response = replace(response, "<|eot_id|>"=>"")
response = GeneralUtils.remove_french_accents(response)
response = GeneralUtils.deFormatLLMtext(response, "granite3")
response = GeneralUtils.deFormatLLMtext(response, "qwen3")
# check whether response has all header
detected_kw = GeneralUtils.detect_keyword(header, response)
@@ -1309,17 +1313,16 @@ function presentbox(a::sommelier, thoughtDict)
# if wine is mentioned but not in timeline or shortmem,
# then the agent is not supposed to recommend the wine
if isWineInEvent == false
errornote = "Your previous response recommends wines that is not in your inventory which is not allowed"
errornote = "Your previous response recommended wines that is not in your inventory which is not allowed"
println("\nERROR YiemAgent presentbox() $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
end
end
result = responsedict[:dialogue]
return result
end
error("generatechat failed to generate a response")
error("presentbox() failed to generate a response")
end
@@ -1426,9 +1429,9 @@ function generatechat(a::sommelier, thoughtDict)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
prompt = GeneralUtils.formatLLMtext(_prompt, "qwen3")
response = a.func[:text2textInstructLLM](prompt; llmkwargs=llmkwargs)
response = GeneralUtils.deFormatLLMtext(response, "granite3")
response = GeneralUtils.deFormatLLMtext(response, "qwen3")
# sometime the model response like this "here's how I would respond: ..."
if occursin("respond:", response)
@@ -1513,7 +1516,7 @@ function generatechat(a::companion; converPartnerName::Union{String, Nothing}=no
errornote = "N/A"
llmkwargs=Dict(
:num_ctx => 32768,
:temperature => 0.3,
:temperature => 0.5,
)
for attempt in 1:maxattempt
if attempt > 1
@@ -1587,8 +1590,7 @@ function generatechat(a::companion; converPartnerName::Union{String, Nothing}=no
end
function generatequestion(a, text2textInstructLLM::Function;
recent::Integer=5)::String
function generatequestion(a, text2textInstructLLM::Function, timeline)::String
systemmsg =
"""
@@ -1607,7 +1609,6 @@ function generatequestion(a, text2textInstructLLM::Function;
3) Answering questions or offering additional services beyond those related to your store's wine recommendations such as discounts, quantity, rewards programs, promotions, delivery options, shipping, boxes, gift wrapping, packaging, personalized messages or something similar. These are the job of our sales team at the store.
At each round of conversation, you will be given the info:
Recap: recap of what has happened so far
Additional info: ...
Your recent events: latest 5 events of the situation
@@ -1634,7 +1635,7 @@ function generatequestion(a, text2textInstructLLM::Function;
You should then respond to the user with:
1) Thought: State your thought about the current situation
2) Q: Given the situation, "ask yourself" at least five, but no more than ten, questions
2) Q: "Ask yourself" at least five, but no more than ten, questions about the situation from your perspective.
3) A: Given the situation, "answer to yourself" the best you can. Do not generate any extra text after you finish answering all questions
You must only respond in format as described below:
@@ -1704,32 +1705,32 @@ function generatequestion(a, text2textInstructLLM::Function;
"N/A"
end
recent_ind = GeneralUtils.recentElementsIndex(length(a.memory[:events]), recent)
recentevents = a.memory[:events][recent_ind]
timeline = createTimeline(recentevents; eventindex=recent_ind)
# recent_ind = GeneralUtils.recentElementsIndex(length(a.memory[:events]), recent)
# recentevents = a.memory[:events][recent_ind]
# timeline = createTimeline(recentevents; eventindex=recent_ind)
errornote = ""
response = nothing # store for show when error msg show up
recap =
if length(a.memory[:recap]) <= recent
"N/A"
else
recapkeys = keys(a.memory[:recap])
recapkeys_vec = [i for i in recapkeys]
recapkeys_vec = recapkeys_vec[1:end-recent]
tempmem = OrderedDict()
for (k, v) in a.memory[:recap]
if k recapkeys_vec
tempmem[k] = v
end
end
# recap =
# if length(a.memory[:recap]) <= recent
# "N/A"
# else
# recapkeys = keys(a.memory[:recap])
# recapkeys_vec = [i for i in recapkeys]
# recapkeys_vec = recapkeys_vec[1:end-recent]
# tempmem = OrderedDict()
# for (k, v) in a.memory[:recap]
# if k ∈ recapkeys_vec
# tempmem[k] = v
# end
# end
GeneralUtils.dictToString(tempmem)
end
# GeneralUtils.dictToString(tempmem)
# end
llmkwargs=Dict(
:num_ctx => 32768,
:temperature => 0.2,
:temperature => 0.5,
)
for attempt in 1:10
@@ -1740,7 +1741,6 @@ function generatequestion(a, text2textInstructLLM::Function;
usermsg =
"""
Recap: $recap)
Additional info: $context
Your recent events: $timeline
P.S. $errornote
@@ -1753,10 +1753,11 @@ function generatequestion(a, text2textInstructLLM::Function;
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
prompt = GeneralUtils.formatLLMtext(_prompt, "qwen3")
response = text2textInstructLLM(prompt, modelsize="medium", llmkwargs=llmkwargs)
response = GeneralUtils.deFormatLLMtext(response, "granite3")
response = text2textInstructLLM(prompt;
modelsize="medium", llmkwargs=llmkwargs, senderId=a.id)
response = GeneralUtils.deFormatLLMtext(response, "qwen3")
# make sure generatequestion() don't have wine name that is not from retailer inventory
# check whether an agent recommend wines before checking inventory or recommend wines
# outside its inventory
@@ -1884,10 +1885,10 @@ function generateSituationReport(a, text2textInstructLLM::Function; skiprecent::
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
prompt = GeneralUtils.formatLLMtext(_prompt, "qwen3")
response = text2textInstructLLM(prompt)
response = GeneralUtils.deFormatLLMtext(response, "granite3")
response = text2textInstructLLM(prompt; senderId=a.id)
response = GeneralUtils.deFormatLLMtext(response, "qwen3")
# check whether response has all header
detected_kw = GeneralUtils.detect_keyword(header, response)
@@ -1956,10 +1957,10 @@ function detectWineryName(a, text)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
prompt = GeneralUtils.formatLLMtext(_prompt, "qwen3")
response = a.func[:text2textInstructLLM](prompt)
response = GeneralUtils.deFormatLLMtext(response, "granite3")
response = a.func[:text2textInstructLLM](prompt; senderId=a.id)
response = GeneralUtils.deFormatLLMtext(response, "qwen3")
println("\ndetectWineryName() ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
pprintln(response)

View File

@@ -300,9 +300,10 @@ function checkinventory(a::T1, input::T2
println("\ncheckinventory input: $inventoryquery ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
# add suppport for similarSQLVectorDB
textresult, rawresponse = SQLLLM.query(inventoryquery, a.func[:executeSQL],
a.func[:text2textInstructLLM],
a.func[:text2textInstructLLM];
insertSQLVectorDB=a.func[:insertSQLVectorDB],
similarSQLVectorDB=a.func[:similarSQLVectorDB])
similarSQLVectorDB=a.func[:similarSQLVectorDB],
llmFormatName="qwen3")
println("\ncheckinventory result ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
println(textresult)
@@ -330,7 +331,8 @@ julia>
# Signature
"""
function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<:AbstractString}
function extractWineAttributes_1(a::T1, input::T2; maxattempt=10
)::String where {T1<:agent, T2<:AbstractString}
systemmsg =
"""
@@ -386,11 +388,16 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
dictkey = ["thought", "wine_name", "winery", "vintage", "region", "country", "wine_type", "grape_varietal", "tasting_notes", "wine_price", "occasion", "food_to_be_paired_with_wine"]
errornote = "N/A"
for attempt in 1:10
llmkwargs=Dict(
:num_ctx => 32768,
:temperature => 0.5,
)
for attempt in 1:maxattempt
#[PENDING] I should add generatequestion()
if attempt > 1
println("\nYiemAgent extractWineAttributes_1() attempt $attempt/10 ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
println("\nYiemAgent extractWineAttributes_1() attempt $attempt/$maxattempt ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
end
usermsg =
@@ -407,7 +414,8 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
response = a.func[:text2textInstructLLM](prompt)
response = a.func[:text2textInstructLLM](prompt;
modelsize="medium", llmkwargs=llmkwargs, senderId=a.id)
response = GeneralUtils.remove_french_accents(response)
response = GeneralUtils.deFormatLLMtext(response, "granite3")
@@ -423,14 +431,14 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
end
checkFlag == true ? continue : nothing
# check whether response has all header
# check whether response has all answer's key points
detected_kw = GeneralUtils.detect_keyword(header, response)
if 0 values(detected_kw)
errornote = "In your previous attempts, the response does not have all header"
errornote = "In your previous attempts, the response does not have all answer's key points"
println("\nYiemAgent extractWineAttributes_1() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
elseif sum(values(detected_kw)) > length(header)
errornote = "In your previous attempts, the response has duplicated header"
errornote = "In your previous attempts, the response has duplicated answer's key points"
println("\nYiemAgent extractWineAttributes_1() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
end
@@ -571,14 +579,12 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
The preference form requires the following information:
sweetness, acidity, tannin, intensity
<You must follow the following guidelines>
You must follow the following guidelines:
1) If specific information required in the preference form is not available in the query or there isn't any, mark with 'NA' to indicate this.
Additionally, words like 'any' or 'unlimited' mean no information is available.
2) Use the conversion table to convert the descriptive word level of sweetness, intensity, tannin, and acidity into a corresponding integer.
3) Do not generate other comments.
</You must follow the following guidelines>
<You should then respond to the user with>
You should then respond to the user with:
Sweetness_keyword: The exact keywords in the user's query describing the sweetness level of the wine.
Sweetness: ( S ), where ( S ) represents integers indicating the range of sweetness levels. Example: 1-2
Acidity_keyword: The exact keywords in the user's query describing the acidity level of the wine.
@@ -587,9 +593,7 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
Tannin: ( T ), where ( T ) represents integers indicating the range of tannin level. Example: 1-3
Intensity_keyword: The exact keywords in the user's query describing the intensity level of the wine.
Intensity: ( I ), where ( I ) represents integers indicating the range of intensity level. Example: 2-4
</You should then respond to the user with>
<You should only respond in format as described below>
You should only respond in format as described below:
Sweetness_keyword: ...
Sweetness: ...
Acidity_keyword: ...
@@ -598,9 +602,8 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
Tannin: ...
Intensity_keyword: ...
Intensity: ...
</You should only respond in format as described below>
<Here are some examples>
Here are some examples:
User's query: I want a wine with a medium-bodied, low acidity, medium tannin.
Sweetness_keyword: NA
Sweetness: NA
@@ -620,7 +623,6 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
Tannin: NA
Intensity_keyword: NA
Intensity: NA
</Here are some examples>
Let's begin!
"""
@@ -648,14 +650,14 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
response = a.func[:text2textInstructLLM](prompt)
response = GeneralUtils.deFormatLLMtext(response, "granite3")
# check whether response has all header
# check whether response has all answer's key points
detected_kw = GeneralUtils.detect_keyword(header, response)
if 0 values(detected_kw)
errornote = "In your previous attempt does not have all header"
errornote = "In your previous attempt does not have all answer's key points"
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
elseif sum(values(detected_kw)) > length(header)
errornote = "In your previous attempt has duplicated header"
errornote = "In your previous attempt has duplicated answer's key points"
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue
end
@@ -787,13 +789,13 @@ function paraphrase(text2textInstructLLM::Function, text::String)
response = replace(response, '`' => "")
response = GeneralUtils.remove_french_accents(response)
# check whether response has all header
# check whether response has all answer's key points
detected_kw = GeneralUtils.detect_keyword(header, response)
if 0 values(detected_kw)
errornote = "\nYiemAgent paraphrase() response does not have all header"
errornote = "\nYiemAgent paraphrase() response does not have all answer's key points"
continue
elseif sum(values(detected_kw)) > length(header)
errornote = "\nnYiemAgent paraphrase() response has duplicated header"
errornote = "\nnYiemAgent paraphrase() response has duplicated answer's key points"
continue
end

View File

@@ -161,6 +161,7 @@ mutable struct sommelier <: agent
chathistory::Vector{Dict{Symbol, Any}}
memory::Dict{Symbol, Any}
func # NamedTuple of functions
llmFormatName::String
end
function sommelier(
@@ -171,6 +172,7 @@ function sommelier(
retailername::String= "retailer_name",
maxHistoryMsg::Integer= 20,
chathistory::Vector{Dict{Symbol, String}} = Vector{Dict{Symbol, String}}(),
llmFormatName::String= "granite3"
)
tools = Dict( # update input format
@@ -213,7 +215,8 @@ function sommelier(
maxHistoryMsg,
chathistory,
memory,
func
func,
llmFormatName
)
return newAgent