This commit is contained in:
narawat lamaiin
2024-07-31 06:52:44 +07:00
parent 792accc619
commit 70e55f0306
5 changed files with 262 additions and 319 deletions

View File

@@ -2,7 +2,7 @@
julia_version = "1.10.4" julia_version = "1.10.4"
manifest_format = "2.0" manifest_format = "2.0"
project_hash = "42fe76ec8191cf95e51733bee474db0f4870d573" project_hash = "03625e2270b5f9b2a2b6b43af674dcefbd8f4f9d"
[[deps.AliasTables]] [[deps.AliasTables]]
deps = ["PtrArrays", "Random"] deps = ["PtrArrays", "Random"]

View File

@@ -11,6 +11,7 @@ GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
HTTP = "cd3eb016-35fb-5094-929b-558a96fad6f3" HTTP = "cd3eb016-35fb-5094-929b-558a96fad6f3"
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1" JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
LLMMCTS = "d76c5a4d-449e-4835-8cc4-dd86ec44f241" LLMMCTS = "d76c5a4d-449e-4835-8cc4-dd86ec44f241"
LibPQ = "194296ae-ab2e-5f79-8cd4-7183a0a5a0d1"
MQTTClient = "985f35cc-2c3d-4943-b8c1-f0931d5f0959" MQTTClient = "985f35cc-2c3d-4943-b8c1-f0931d5f0959"
PrettyPrinting = "54e16d92-306c-5ea0-a30b-337be88ac337" PrettyPrinting = "54e16d92-306c-5ea0-a30b-337be88ac337"
PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d" PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"

View File

@@ -214,20 +214,24 @@ function decisionMaker(a::T)::Dict{Symbol, Any} where {T<:agent}
""" """
You are a helpful sommelier working for a wine store. You are a helpful sommelier working for a wine store.
Your task is to help the user choose the best wine that match the user preferences from your inventory. Your task is to help the user choose the best wine that match the user preferences from your inventory.
You are also eager to improve your helpfulness.
At each round of conversation, the user will give you the current situation: At each round of conversation, the user will give you the current situation:
Context: ... Context: ...
Your earlier conversation with the user: ... Your earlier conversation with the user: ...
You must follow the following DO guidelines: You SHOULD follow the following guidelines:
- If the user interrupts, prioritize the user then get back to the guidelines. - If the user interrupts, prioritize the user then get back to the guidelines.
- Check your inventory before mentioning any specific wine.
- Get to know how much the user willing to spend - Get to know how much the user willing to spend
- Get to know type of wine the user is looking for e.g. red, white, sparkling, rose, dessert, fortified - Get to know type of wine the user is looking for e.g. red, white, sparkling, rose, dessert, fortified
- Get to know what occasion the user is buying wine for - Get to know what occasion the user is buying wine for
- Get to know what characteristics of wine the user is looking for e.g. tannin, sweetness, intensity, acidity - Get to know what characteristics of wine the user is looking for e.g. tannin, sweetness, intensity, acidity
- Get to know what food will be served with wine - Get to know what food will be served with wine
- Search for wines that match the user preferences
- Recommend wine to the user
- Ask the customer if there is anything else you could help. If not, finish the conversation.
You MUST follow the following guidelines:
- Do not mentioning any wine until you've check your inventory.
You should then respond to the user with interleaving Thought, Plan, Action and Observation: You should then respond to the user with interleaving Thought, Plan, Action and Observation:
- thought: - thought:
@@ -235,7 +239,7 @@ function decisionMaker(a::T)::Dict{Symbol, Any} where {T<:agent}
- plan: Based on the current situation, state a complete plan to complete the task. Be specific. - plan: Based on the current situation, state a complete plan to complete the task. Be specific.
- action_name (Must be aligned with your plan): Can be one of the following functions: - action_name (Must be aligned with your plan): Can be one of the following functions:
1) CHATBOX[text], which you can use to talk with the user. "text" is in verbal English. 1) CHATBOX[text], which you can use to talk with the user. "text" is in verbal English.
2) CHECKINVENTORY[query], which you can use to find info about wine in your inventory. "query" is a search term in verbal English. 2) CHECKINVENTORY[query], which you can use to check info about wine in your inventory. "query" is a search term in verbal English.
Good query example: black car with a stereo, 200 mile range and an electric motor. Good query example: black car with a stereo, 200 mile range and an electric motor.
Good query example: How many car brand are from Asia? Good query example: How many car brand are from Asia?
- action_input: input to the action - action_input: input to the action
@@ -926,13 +930,16 @@ julia> response = ChatAgent.conversation(newAgent, "Hi! how are you?")
# TODO # TODO
- [] update docstring - [] update docstring
- [x] MCTS() for planning
- [] add recap to initialState for earlier completed question - [] add recap to initialState for earlier completed question
- [WORKING] conversation loop
# Signature # Signature
""" """
function conversation(a::T, userinput::Dict) where {T<:agent} function conversation(a::T, userinput::Dict) where {T<:agent}
# place holder
actionname = nothing
result = nothing
chatresponse = nothing
if userinput[:text] == "newtopic" if userinput[:text] == "newtopic"
clearhistory(a) clearhistory(a)
@@ -941,10 +948,45 @@ function conversation(a::T, userinput::Dict) where {T<:agent}
# add usermsg to a.chathistory # add usermsg to a.chathistory
addNewMessage(a, "user", userinput[:text]) addNewMessage(a, "user", userinput[:text])
think(a) actionname, result = think(a)
# -------- use dummy memory to check generatechat() for halucination (checking inventory) -------- #
mem = deepcopy(a.memory)
if actionname == "CHATBOX"
mem[:chatbox] = result
else
push!(mem[:shortmem], Dict(Symbol(actionname)=> result))
end
# thought will be added to chat model via context # thought will be added to chat model via context
chatresponse = generatechat(a) chatresponse = generatechat(mem, a.chathistory, a.text2textInstructLLM)
# some time LLM said to user that it (checking inventory) but it is not.
# if chatresponse want to check inventory but think() didn't checkinventory then do it
if occursin("(check", chatresponse) && occursin("inventory)", chatresponse) &&
actionname != "checkinventory"
actionname, result = forceInventoryCheck(a)
if actionname == "CHATBOX"
a.memory[:chatbox] = result
else
push!(a.memory[:shortmem], Dict(Symbol(actionname)=> result))
end
# generate chatresponse again because we have force inventory check
chatresponse = generatechat(a.memory, a.chathistory, a.text2textInstructLLM)
else
if actionname == "CHATBOX"
a.memory[:chatbox] = result
else
push!(a.memory[:shortmem], Dict(Symbol(actionname)=> result))
end
# since chatresponse does not halucinate i.e. no (check inventory), it does not need
# to regenerate again and con be use directly
end
addNewMessage(a, "assistant", chatresponse) addNewMessage(a, "assistant", chatresponse)
return chatresponse return chatresponse
@@ -952,78 +994,6 @@ function conversation(a::T, userinput::Dict) where {T<:agent}
end end
# function conversation(a::T, userinput::Dict) where {T<:agent}
# config = deepcopy(a.config)
# pprint(config)
# if userinput[:text] == "newtopic"
# clearhistory(a)
# return "Okay. What shall we talk about?"
# else
# # add usermsg to a.chathistory
# addNewMessage(a, "user", userinput[:text])
# if isempty(a.plan[:currenttrajectory])
# # initial state
# a.plan[:currenttrajectory] = Dict{Symbol, Any}(
# # deepcopy the info to prevent modifying the info unintentionally during MCTS planning
# :customerinfo=> deepcopy(a.keywordinfo[:customerinfo]),
# :storeinfo=> deepcopy(a.keywordinfo[:storeinfo]),
# :userselect=> nothing,
# :reward=> 0,
# :isterminal=> false,
# :evaluation=> nothing,
# :lesson=> nothing,
# :totalTrajectoryReward=> nothing,
# # contain question, thought_1, action_1, observation_1, thought_2, ...
# :thoughtHistory=> OrderedDict{Symbol, Any}(
# #[] :recap=>,
# :question=> userinput[:text],
# ),
# # store conversation for virtual customer because the virtual customer agent is just
# # a function and stateless.
# :virtualCustomerChatHistory=> Vector{Dict{Symbol, Any}}(
# [Dict(:name=> "user", :text=> userinput[:text])]
# ),
# )
# else
# _, a.plan[:currenttrajectory] = makeNewState(a.plan[:currenttrajectory],
# a.plan[:activeplan][:thoughtHistory], userinput[:text], userinput[:select],
# userinput[:reward], userinput[:isterminal])
# end
# end
# while true
# bestNextState, besttrajectory = LLMMCTS.runMCTS(a.plan[:currenttrajectory],
# transition, config, decisionMaker, evaluator, reflector;
# totalsample=2, maxDepth=3, maxiterations=3, explorationweight=1.0)
# a.plan[:activeplan] = bestNextState
# latestActionKey, latestActionIndice =
# GeneralUtils.findHighestIndexKey(bestNextState[:thoughtHistory], "action")
# actionname = bestNextState[:thoughtHistory][latestActionKey][:name]
# actioninput = bestNextState[:thoughtHistory][latestActionKey][:input]
# # transition
# if actionname == "chatbox"
# # add usermsg to a.chathistory
# addNewMessage(a, "assistant", actioninput)
# return actioninput
# elseif actionname == "recommendbox"
# # add usermsg to a.chathistory
# addNewMessage(a, "assistant", actioninput)
# return actioninput
# else
# _, a.plan[:currenttrajectory] = transition(a, a.plan[:currenttrajectory], a.plan[:activeplan])
# end
# end
# end
""" """
@@ -1043,7 +1013,7 @@ julia>
# Signature # Signature
""" """
function think(a::T) where {T<:agent} function think(a::T)::NamedTuple{(:actionname, :result), Tuple{String, String}} where {T<:agent}
thoughtDict = decisionMaker(a) thoughtDict = decisionMaker(a)
actionname = thoughtDict[:action_name] actionname = thoughtDict[:action_name]
actioninput = thoughtDict[:action_input] actioninput = thoughtDict[:action_input]
@@ -1066,11 +1036,133 @@ function think(a::T) where {T<:agent}
errormsg::Union{AbstractString, Nothing} = haskey(response, :errormsg) ? response[:errormsg] : nothing errormsg::Union{AbstractString, Nothing} = haskey(response, :errormsg) ? response[:errormsg] : nothing
success::Bool = haskey(response, :success) ? response[:success] : false success::Bool = haskey(response, :success) ? response[:success] : false
if actionname == "CHATBOX" return (actionname=actionname, result=result)
a.memory[:chatbox] = result end
else
push!(a.memory[:shortmem], Dict(Symbol(actionname)=> result))
""" Force to think and check inventory
[WORKING]
"""
function forceInventoryCheck(a::T)::NamedTuple{(:actionname, :result), Tuple{String, String}} where {T<:agent}
thoughtDict = thinkCheckInventory(a)
actionname = thoughtDict[:action_name]
actioninput = thoughtDict[:action_input]
# map action and input() to llm function
response =
if actionname == "CHECKINVENTORY"
checkinventory(a, actioninput)
else
error("undefined LLM function. Requesting $actionname")
end
# this section allow LLM functions above to have different return values.
result = haskey(response, :result) ? response[:result] : nothing
select = haskey(response, :select) ? response[:select] : nothing
reward::Integer = haskey(response, :reward) ? response[:reward] : 0
isterminal::Bool = haskey(response, :isterminal) ? response[:isterminal] : false
errormsg::Union{AbstractString, Nothing} = haskey(response, :errormsg) ? response[:errormsg] : nothing
success::Bool = haskey(response, :success) ? response[:success] : false
return (actionname=actionname, result=result)
end
"""
[WORKING]
"""
function thinkCheckInventory(a::T)::Dict{Symbol, Any} where {T<:agent}
systemmsg =
"""
You are a helpful sommelier working for a wine store.
Your task is to help the user choose the best wine that match the user preferences from your inventory.
Definitions:
- observation: result of the preceding immediate action.
At each round of conversation, the user will give you the current situation:
Context: ...
Your earlier conversation with the user: ...
You must follow the following guidelines:
- Check inventory immediately based on what you know about the user.
You should then respond to the user with interleaving Thought, Plan, Action and Observation:
- thought:
1) State your reasoning about the current situation.
- plan: Based on the current situation, state a complete plan to complete the task. Be specific.
- action_name (Must be aligned with your plan): Can be one of the following functions:
1) CHECKINVENTORY[query], which you can use to check info about wine in your inventory. "query" is a search term in verbal English.
Good query example: black car with a stereo, 200 mile range and an electric motor.
Good query example: How many car brand are from Asia?
- action_input: input to the action
You should only respond in format as described below:
thought: ...
plan: ...
action_name: ...
action_input: ...
Let's begin!
"""
usermsg =
"""
Context: None
Your earlier conversation with the user: $(vectorOfDictToText(a.chathistory))
"""
_prompt =
[
Dict(:name=> "system", :text=> systemmsg),
Dict(:name=> "user", :text=> usermsg)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt, "llama3instruct")
prompt *=
"""
<|start_header_id|>assistant<|end_header_id|>
"""
response = nothing # store for show when error msg show up
for attempt in 1:10
try
response = a.text2textInstructLLM(prompt)
responsedict = GeneralUtils.textToDict(response,
["thought", "plan", "action_name", "action_input"],
rightmarker=":", symbolkey=true)
if responsedict[:action_name] ["CHECKINVENTORY"]
error("decisionMaker didn't use the given functions ", @__LINE__)
end
for i [:thought, :plan, :action_name]
if length(JSON3.write(responsedict[i])) == 0
error("$i is empty ", @__LINE__)
end
end
# check if there are more than 1 key per categories
for i [:thought, :plan, :action_name, :action_input]
matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
if length(matchkeys) > 1
error("DecisionMaker has more than one key per categories")
end
end
return responsedict
catch e
io = IOBuffer()
showerror(io, e)
errorMsg = String(take!(io))
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
println("")
println("Attempt $attempt. Error occurred: $errorMsg\n$st")
println("")
end
end end
error("DecisionMaker failed to generate a thought ", response)
end end
@@ -1090,14 +1182,14 @@ julia>
# TODO # TODO
- [] update docs - [] update docs
- [WORKING] implement the function - [x] implement the function
# Signature # Signature
""" """
function generatechat(a::T) where {T<:agent} function generatechat(memory::Dict, chathistory::Vector, text2textInstructLLM::Function)
systemmsg = systemmsg =
""" """
You are a polite sommelier working for a wine store. You are an website-based polite sommelier working for an online wine store.
Your task is to help the user choose the best wine that match the user preferences from your inventory. Your task is to help the user choose the best wine that match the user preferences from your inventory.
At each round of conversation, the user will give you the current situation: At each round of conversation, the user will give you the current situation:
@@ -1105,9 +1197,11 @@ function generatechat(a::T) where {T<:agent}
Your thoughts: Your current thinking in your mind Your thoughts: Your current thinking in your mind
Your earlier conversation with the user: ... Your earlier conversation with the user: ...
You must follow the following DON'T guidelines: You must follow the following guidelines:
- Don't mention any specific wine until you've checked your inventory. - Do not mentioning any wine until you've check your inventory.
- Check your inventory before recommending any specific wine. - Check your inventory before recommending any specific wine.
- Your thoughts matter.
- Do not offer the user to try wine as you are internet-based agent.
You should then respond to the user with: You should then respond to the user with:
- chat: what do you want to say to the user based on the current situation - chat: what do you want to say to the user based on the current situation
@@ -1118,15 +1212,17 @@ function generatechat(a::T) where {T<:agent}
Let's begin! Let's begin!
""" """
context = length(a.memory[:shortmem]) > 0 ? vectorOfDictToText(a.memory[:shortmem], withkey=false) : "None" context = length(memory[:shortmem]) > 0 ? vectorOfDictToText(memory[:shortmem], withkey=false) : "None"
usermsg = usermsg =
""" """
Context: $context Context: $context
Your earlier conversation with the user: $(vectorOfDictToText(a.chathistory)) Your earlier conversation with the user: $(vectorOfDictToText(chathistory))
Your thoughts: $(a.memory[:chatbox]) Your thoughts: $(memory[:chatbox])
""" """
println("")
println("--> think ", @__FILE__, " ", @__LINE__)
println(memory[:chatbox])
_prompt = _prompt =
[ [
Dict(:name=> "system", :text=> systemmsg), Dict(:name=> "system", :text=> systemmsg),
@@ -1142,7 +1238,7 @@ function generatechat(a::T) where {T<:agent}
for attempt in 1:5 for attempt in 1:5
try try
response = a.text2textInstructLLM(prompt) response = text2textInstructLLM(prompt)
responsedict = GeneralUtils.textToDict(response, responsedict = GeneralUtils.textToDict(response,
["chat"], ["chat"],
rightmarker=":", symbolkey=true) rightmarker=":", symbolkey=true)
@@ -1175,13 +1271,6 @@ function generatechat(a::T) where {T<:agent}
end end
end end
error("generatechat failed to generate an evaluation") error("generatechat failed to generate an evaluation")
end end

View File

@@ -9,81 +9,6 @@ using ..type, ..util
# ---------------------------------------------- 100 --------------------------------------------- # # ---------------------------------------------- 100 --------------------------------------------- #
"""
# Arguments
# Return
# Example
```jldoctest
julia>
```
# TODO
- [] update docstring
- [WORKING] implement the function
# Signature
"""
function userChatbox(a::T1, input::T2) where {T1<:agent, T2<:AbstractString}
error("--> userChatbox")
# put in model format
virtualWineCustomer = a.config[:externalservice][:virtualWineCustomer_1]
llminfo = virtualWineCustomer[:llminfo]
formattedinput =
if llminfo[:name] == "llama3instruct"
formatLLMtext_llama3instruct("assistant", input)
else
error("llm model name is not defied yet $(@__LINE__)")
end
# send formatted input to user using GeneralUtils.sendReceiveMqttMsg
# return response
end
"""
# Arguments
# Return
# Example
```jldoctest
julia>
```
# TODO
- [] update docstring
- [PENDING] implement the function
# Signature
"""
function userRecommendbox(a::T1, input::T2) where {T1<:agent, T2<:AbstractString}
error("--> userRecommendbox")
# put in model format
virtualWineCustomer = a.config[:externalservice][:virtualWineCustomer_1]
llminfo = virtualWineCustomer[:llminfo]
formattedinput =
if llminfo[:name] == "llama3instruct"
formatLLMtext_llama3instruct("assistant", input)
else
error("llm model name is not defied yet $(@__LINE__)")
end
# send formatted input to user using GeneralUtils.sendReceiveMqttMsg
# return response
end
""" Chatbox for chatting with virtual wine customer. """ Chatbox for chatting with virtual wine customer.
@@ -363,15 +288,15 @@ julia> result = checkinventory(agent, input)
# Signature # Signature
""" """
function checkinventory(a::T1, input::T2 function checkinventory(a::T1, input::T2
)::Union{Tuple{String, Number, Number, Bool}, Tuple{String, Nothing, Number, Bool}} where {T1<:agent, T2<:AbstractString} )::NamedTuple{(:result, :success, :errormsg), Tuple{String, Bool, Union{String, Nothing}}} where {T1<:agent, T2<:AbstractString}
wineattributes_1 = extractWineAttributes_1(a, input) wineattributes_1 = extractWineAttributes_1(a, input)
wineattributes_2 = extractWineAttributes_2(a, input) wineattributes_2 = extractWineAttributes_2(a, input)
inventoryquery = "$wineattributes_1, $wineattributes_2" inventoryquery = "$wineattributes_1, $wineattributes_2"
println("--> checkinventory: $inventoryquery")
result = SQLLLM.query(inventoryquery, a.executeSQL, a.text2textInstructLLM) result = SQLLLM.query(inventoryquery, a.executeSQL, a.text2textInstructLLM)
return result return (result=result, success=true, errormsg=nothing)
end end
@@ -399,7 +324,7 @@ function extractWineAttributes_1(a::T1, input::T2
systemmsg = systemmsg =
""" """
As an helpful sommelier, your task is to fill the user's preference form based on the user query. As an helpful sommelier, your task is to fill out the user's preference form by copying the corresponding words from the user's query.
At each round of conversation, the user will give you the current situation: At each round of conversation, the user will give you the current situation:
User query: ... User query: ...
@@ -415,14 +340,14 @@ function extractWineAttributes_1(a::T1, input::T2
You should then respond to the user with the following points: You should then respond to the user with the following points:
- wine_type: Can be one of: red, white, sparkling, rose, dessert or fortified - wine_type: Can be one of: red, white, sparkling, rose, dessert or fortified
- price: ... - price: Must be an integer representing the cost of the wine.
- occasion: ... - occasion: ...
- food_to_be_paired_with_wine: food that will be served with wine - food_to_be_paired_with_wine: food that will be served with wine
- country: wine's country of origin - country: wine's country of origin
- grape_variety: ... - grape_variety: ...
- wine_notes: - tasting_notes: dry, full bodied or something similar
Bad example (these words are not wine notes - descriptive words): dry, sour, full bodied, etc. - flavor: what the wine tast like? e.g. floral, citrus, earthy, fruity, tropical, nutty or something similar
Good example (these words are wine notes - descriptive words): floral, citrus, earthy, fruity, tropical, nutty, etc.
You should only respond in the form as described below: You should only respond in the form as described below:
wine_type: ... wine_type: ...
@@ -431,7 +356,8 @@ function extractWineAttributes_1(a::T1, input::T2
food_to_be_paired_with_wine: ... food_to_be_paired_with_wine: ...
country: ... country: ...
grape_variety: ... grape_variety: ...
wine_notes: ... tasting_notes: ...
flavor: ...
Let's begin! Let's begin!
""" """
@@ -454,7 +380,7 @@ function extractWineAttributes_1(a::T1, input::T2
<|start_header_id|>assistant<|end_header_id|> <|start_header_id|>assistant<|end_header_id|>
""" """
attributes = ["wine_type", "price", "occasion", "food_to_be_paired_with_wine", "country", "grape_variety"] attributes = ["wine_type", "price", "occasion", "food_to_be_paired_with_wine", "country", "grape_variety", "tasting_notes", "flavor"]
for attempt in 1:5 for attempt in 1:5
try try
@@ -467,6 +393,10 @@ function extractWineAttributes_1(a::T1, input::T2
end end
end end
#[TESTING] remove tasting_notes because the database didn't prepare to be search using it
responsedict[:tasting_notes] = "NA"
responsedict[:flavor] = "NA"
result = "" result = ""
for (k, v) in responsedict for (k, v) in responsedict
if !occursin("NA", v) if !occursin("NA", v)
@@ -474,6 +404,9 @@ function extractWineAttributes_1(a::T1, input::T2
end end
end end
#[WORKING] remove halucination. "highend dry white wine" --> "wine_type: white, occasion: special occasion, food_to_be_paired_with_wine: seafood, fish, country: France, Italy, USA, grape_variety: Chardonnay, Sauvignon Blanc, Pinot Grigio\nwine_notes: citrus, green apple, floral"
result = result[1:end-2] # remove the ending ", " result = result[1:end-2] # remove the ending ", "
return result return result
@@ -498,29 +431,29 @@ function extractWineAttributes_2(a::T1, input::T2
""" """
Conversion Table: Conversion Table:
Intensity level: Intensity level:
1: May correspond to "light-bodied" or a similar description. 1 to 2: May correspond to "light-bodied" or a similar description.
2: May correspond to "med light", "medium light" or a similar description. 2 to 3: May correspond to "med light bodied", "medium light" or a similar description.
3: May correspond to "medium" or a similar description. 3 to 4: May correspond to "medium bodied" or a similar description.
4: May correspond to "med full", "medium full" or a similar description. 4 to 5: May correspond to "med full bodied", "medium full" or a similar description.
5: May correspond to "full" or a similar description. 4 to 5: May correspond to "full bodied" or a similar description.
Sweetness level: Sweetness level:
1: May correspond to "dry", "no sweet" or a similar description. 1 to 2: May correspond to "dry", "no sweet" or a similar description.
2: May correspond to "off dry", "less sweet" or a similar description. 2 to 3: May correspond to "off dry", "less sweet" or a similar description.
3: May correspond to "semi sweet" or a similar description. 3 to 4: May correspond to "semi sweet" or a similar description.
4: May correspond to "sweet" or a similar description. 4 to 5: May correspond to "sweet" or a similar description.
5: May correspond to "very sweet" or a similar description. 4 to 5: May correspond to "very sweet" or a similar description.
Tannin level: Tannin level:
1: May correspond to "low tannin" or a similar description. 1 to 2: May correspond to "low tannin" or a similar description.
2: May correspond to "semi low tannin" or a similar description. 2 to 3: May correspond to "semi low tannin" or a similar description.
3: May correspond to "medium tannin" or a similar description. 3 to 4: May correspond to "medium tannin" or a similar description.
4: May correspond to "semi high tannin" or a similar description. 4 to 5: May correspond to "semi high tannin" or a similar description.
5: May correspond to "high tannin" or a similar description. 4 to 5: May correspond to "high tannin" or a similar description.
Acidity level: Acidity level:
1: May correspond to "low acidity" or a similar description. 1 to 2: May correspond to "low acidity" or a similar description.
2: May correspond to "semi low acidity" or a similar description. 2 to 3: May correspond to "semi low acidity" or a similar description.
3: May correspond to "medium acidity" or a similar description. 3 to 4: May correspond to "medium acidity" or a similar description.
4: May correspond to "semi high acidity" or a similar description. 4 to 5: May correspond to "semi high acidity" or a similar description.
5: May correspond to "high acidity" or a similar description. 4 to 5: May correspond to "high acidity" or a similar description.
""" """
systemmsg = systemmsg =
@@ -541,10 +474,10 @@ function extractWineAttributes_2(a::T1, input::T2
3) Do not generate other comments. 3) Do not generate other comments.
You should then respond to the user with the following points: You should then respond to the user with the following points:
- sweetness: S where S is an integer indicating sweetness level - sweetness: S where S are integers represent the range of sweetness levels e.g. 1-2
- acidity: D where D is an integer indicating acidity level - acidity: D where D are integers represent the range of acidity level e.g. 3-4
- tannin: T where T is an integer indicating tannin level - tannin: T where T are integers represent the range of tannin level e.g. 4-5
- intensity: I where I is an integer indicating intensity level - intensity: I where I are integers represent the range of intensity level e.g. 3-4
You should only respond in the form as described below: You should only respond in the form as described below:
sweetness: ... sweetness: ...
@@ -587,6 +520,13 @@ function extractWineAttributes_2(a::T1, input::T2
end end
end end
# some time LLM think the user mentioning acidity and tannin but actually didn't
for (k, v) in responsedict
if k [:acidity, :tannin] && !occursin(string(k), input)
responsedict[k] = "NA"
end
end
result = "" result = ""
for (k, v) in responsedict for (k, v) in responsedict
if !occursin("NA", v) if !occursin("NA", v)
@@ -610,93 +550,6 @@ function extractWineAttributes_2(a::T1, input::T2
error("wineattributes_wordToNumber() failed to get a response") error("wineattributes_wordToNumber() failed to get a response")
end end
# function recheckWineAttributes(a::T1, input::T2
# )::String where {T1<:agent, T2<:AbstractString}
# systemmsg =
# """
# As an helpful sommelier, your task is to check what preferences the user mentioned in the query.
# At each round of conversation, the user will give you the current situation:
# User query: ...
# The preferences are:
# - wine type: red, white, sparkling, rose, dessert, fortified
# - price
# - occasion
# - food to be paired with wine
# - country
# - grape_variety
# - tasting_notes
# - sweetness
# - acidity
# - tannin
# - intensity: wine body e.g. full bodied, light bodied
# You should then respond to the user with the following points:
# 1) mentioned_preferences: list all the preferences in the user's query.
# Good example: mentioned_preferences: "price", "country", "wine type"
# You should only respond in the form as described below:
# mentioned_preferences: ...
# Let's begin!
# """
# usermsg =
# """
# User query: $input
# """
# _prompt =
# [
# Dict(:name=> "system", :text=> systemmsg),
# Dict(:name=> "user", :text=> usermsg)
# ]
# # put in model format
# prompt = GeneralUtils.formatLLMtext(_prompt, "llama3instruct")
# prompt *=
# """
# <|start_header_id|>assistant<|end_header_id|>
# """
# attributes = ["mentioned_preferences"]
# for attempt in 1:5
# try
# response = a.text2textInstructLLM(prompt)
# responsedict = GeneralUtils.textToDict(response, attributes, rightmarker=":", symbolkey=true)
# for i ∈ attributes
# if length(JSON3.write(responsedict[Symbol(i)])) == 0
# error("$i is empty ", @__LINE__)
# end
# end
# result = ""
# for (k, v) in responsedict
# if !occursin("NA", v)
# result *= "$k: $v, "
# end
# end
# result = result[1:end-2] # remove the ending ", "
# return result
# catch e
# io = IOBuffer()
# showerror(io, e)
# errorMsg = String(take!(io))
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
# println("")
# println("Attempt $attempt. Error occurred: $errorMsg\n$st")
# println("")
# end
# end
# error("wineattributes_wordToNumber() failed to get a response")
# end
""" Attemp to correct LLM response's incorrect JSON response. """ Attemp to correct LLM response's incorrect JSON response.

View File

@@ -73,26 +73,26 @@ end
id="testingSessionID", # agent instance id id="testingSessionID", # agent instance id
) )
# function main() function main()
# userinput = "Hello, I would like a get a bottle of wine." userinput = "Hello, I would like a get a bottle of wine."
# for i in 1:10 for i in 1:10
# response = YiemAgent.conversation(a, Dict(:text=> userinput)) response = YiemAgent.conversation(a, Dict(:text=> userinput))
# println("") println("")
# println("--> assistant response: \n", response) println("--> assistant response: \n", response)
# println("") println("")
# println("--> user input:") println("--> user input:")
# userinput = readline() userinput = readline()
# end end
# end end
# main() main()
# """ """
# I'm having a graduation party this evening. I have no budget limit. I'm having a graduation party this evening. I have no budget limit. I want a bottle of dry white wine.
# I have no idea. The party will be formal. What type of wine people usually get for this occasion? I have no idea. The party will be formal. What type of wine people usually get for this occasion?
# What about sparkling Rose? What about sparkling Rose?
# """ """
@@ -119,9 +119,9 @@ end
input = "query=\"medium-bodied dry white wine\"" # input = "query=\"medium-bodied dry white wine\""
# input = "the customer is looking for a medium-bodied, dry white wine." # # input = "the customer is looking for a medium-bodied, dry white wine."
result = YiemAgent.checkinventory(a, input) # result = YiemAgent.checkinventory(a, input)