module interface
export addNewMessage, conversation, decisionMaker, evaluator, reflector, generatechat,
generalconversation, detectWineryName, generateSituationReport
using JSON3, DataStructures, Dates, UUIDs, HTTP, Random, PrettyPrinting, Serialization,
DataFrames
using GeneralUtils
using ..type, ..util, ..llmfunction
# ------------------------------------------------------------------------------------------------ #
# pythoncall setting #
# ------------------------------------------------------------------------------------------------ #
# Ref: https://github.com/JuliaPy/PythonCall.jl/issues/252
# by setting the following variables, PythonCall.jl will use:
# 1. system's python and packages installed by system (via apt install)
# or 2. conda python and packages installed by conda
# if these setting are not set (comment out), PythonCall will use its own python and packages that
# installed by CondaPkg.jl (from env_preparation.jl)
# ENV["JULIA_CONDAPKG_BACKEND"] = "Null" # set condapkg backend = none
# systemPython = split(read(`which python`, String), "\n")[1] # system's python path
# ENV["JULIA_PYTHONCALL_EXE"] = systemPython # find python location with $> which python ex. raw"/root/conda/bin/python"
# using PythonCall
# const py_agents = PythonCall.pynew()
# const py_llms = PythonCall.pynew()
# function __init__()
# # PythonCall.pycopy!(py_cv2, pyimport("cv2"))
# # equivalent to from urllib.request import urlopen in python
# PythonCall.pycopy!(py_agents, pyimport("langchain.agents"))
# PythonCall.pycopy!(py_llms, pyimport("langchain.llms"))
# end
# ---------------------------------------------- 100 --------------------------------------------- #
macro executeStringFunction(functionStr, args...)
# Parse the function string into an expression
func_expr = Meta.parse(functionStr)
# Create a new function with the parsed expression
function_to_call = eval(Expr(:function,
Expr(:call, func_expr, args...), func_expr.args[2:end]...))
# Call the newly created function with the provided arguments
function_to_call(args...)
end
""" Think and choose action
# Arguments
- `config::T1`
config
- `state::T2`
a game state
# Return
- `thoughtDict::Dict`
# Example
```jldoctest
julia> config = Dict(
:mqttServerInfo => Dict(
:description => "mqtt server info",
:port => 1883,
:broker => "mqtt.yiem.cc"
),
:externalservice => Dict(
:text2textinstruct => Dict(
:mqtttopic => "/loadbalancer/requestingservice",
:description => "text to text service with instruct LLM",
:llminfo => Dict(
:name => "llama3instruct"
)
),
)
)
julia> output_thoughtDict = Dict(
:thought_1 => "The customer wants to buy a bottle of wine. This is a good start!",
:action_1 => Dict{Symbol, Any}(
:action=>"CHATBOX",
:input=>"What occasion are you buying the wine for?"
),
:observation_1 => ""
)
```
# TODO
- [] update docstring
- [x] implement the function
- [] implement RAG to pull similar experience
- [] use customerinfo
- [] user storeinfo
# Signature
"""
function decisionMaker(a::T; recent::Integer=5)::Dict{Symbol,Any} where {T<:agent}
# lessonDict = copy(JSON3.read("lesson.json"))
# lesson =
# if isempty(lessonDict)
# ""
# else
# lessons = Dict{Symbol, Any}()
# for (k, v) in lessonDict
# lessons[k] = lessonDict[k][:lesson]
# end
# """
# You have attempted to help the user before and failed, either because your reasoning for the
# recommendation was incorrect or your response did not exactly match the user expectation.
# The following lesson(s) give a plan to avoid failing to help the user in the same way you
# did previously. Use them to improve your strategy to help the user.
# Here are some lessons in JSON format:
# $(JSON3.write(lessons))
# When providing the thought and action for the current trial, that into account these failed
# trajectories and make sure not to repeat the same mistakes and incorrect answers.
# """
# end
totalevents = length(a.memory[:events])
ind =
if totalevents > recent
start = totalevents - recent
start:totalevents
else
1:totalevents
end
recentevents = ""
for (i, event) in enumerate(a.memory[:events][ind])
if event[:outcome] === nothing
recentevents *= "$i) $(event[:subject])> $(event[:actioninput])\n"
else
recentevents *= "$i) $(event[:subject])> $(event[:actioninput]) $(event[:outcome])\n"
end
end
#[TESTING] recap as caching
# query similar result from vectorDB
recapkeys = keys(a.memory[:recap])
_recapkeys_vec = [i for i in recapkeys]
# select recent keys
_recentRecapKeys =
if length(a.memory[:recap]) <= 3 # 1st message is a user's hello msg
_recapkeys_vec
elseif length(a.memory[:recap]) > 3
l = length(a.memory[:recap])
_recapkeys_vec[l-2:l]
end
# get recent recap
_recentrecap = OrderedDict()
for (k, v) in a.memory[:recap]
if k ∈ _recentRecapKeys
_recentrecap[k] = v
end
end
recentrecap = GeneralUtils.dictToString_noKey(_recentrecap)
similarDecision = a.func[:similarSommelierDecision](recentrecap)
if similarDecision !== nothing
responsedict = similarDecision
return responsedict
else
systemmsg =
"""
Your name is $(a.name). You are a helpful English-speaking assistant, acting as a polite, website-based sommelier for $(a.retailername)'s wine store.
Your goal includes:
1) Establish a connection with the customer by greeting them warmly
2) Help them select the best wines only from your store's inventory that align with their preferences
Your responsibility includes:
1) Make an informed decision about what you need to do to achieve the goal
2) Thanks the user when they don't need any further assistance and invite them to comeback next time
Your responsibility excludes:
1) Asking or guiding the user to make an order or purchase
2) Processing sales orders or engaging in any other sales-related activities
3) Answering questions beyond just recommendations.
4) Offering additional services beyond just recommendations.
At each round of conversation, you will be given the current situation:
Your recent events: latest 5 events of the situation
Your Q&A: the question and answer you have asked yourself
You must follow the following guidelines:
- Generally speaking, your inventory has some wines from France, the United States, Australia, Spain, and Italy, but you won't know exactly until you check your inventory.
- All wines in your inventory are always in stock.
- Engage in conversation to indirectly investigate the customer's intention, budget and preferences before checking your inventory.
- Do not ask the user about wine's flavor e.g. floral, citrusy, nutty or some thing similar as these terms cannot be used to search the database.
- Once the user has selected their wine, ask the user if they need any further assistance. Do not offer any additional services. If the user doesn't need any further assistance, say goodbye and invite them to come back next time.
- Medium and full-bodied red wines should not be paired with spicy foods.
You should follow the following guidelines:
- When searching an inventory, search as broadly as possible based on the information you have gathered so far.
- Encourage the customer to explore different options and try new things.
- Sometimes, the item a user desires might not be available in your inventory. In such cases, inform the user that the item is unavailable and suggest an alternative instead.
- If a customer requests information about discounts, quantity, rewards programs, promotions, delivery options, boxes, gift wrapping, packaging, or personalized messages, please inform them that they can contact our sales team at the store.
- Only recommend
For your information:
- vintage 0 means non-vintage.
You should then respond to the user with interleaving Understanding, Reasoning, Plan, Action:
1) Understanding:
- State your understanding about the current situation.
2) Reasoning:
- State your step by step reasoning about the current situation.
3) Plan: Based on the current situation, state a complete plan to complete the task. Be specific.
4) Action_name (Must be aligned with your plan): The name of the action. Typically corresponds to the execution of the first step in your plan.
Can be one of the following functions:
- CHATBOX which you can use to talk with the user. The input is your intentions for the dialogue. Be specific.
- CHECKINVENTORY which you can use to check info about wine you want in your inventory. The input is a search term in verbal English.
Good query example: white wine, full-bodied, France, less than 2000 USD.
- ENDCONVERSATION which you can use when you believe the user has concluded their interaction, to properly end the conversation with them. Input is "NA".
5) Action_input: input of the action
You should only respond in format as described below:
Understanding: ...
Reasoning: ...
Plan: ...
Action_name: ...
Action_input: ...
Let's begin!
"""
chathistory = chatHistoryToText(a.chathistory)
# check if winename in shortmem occurred in chathistory. if not, skip decision and imediately use PRESENTBOX
if length(a.memory[:shortmem][:available_wine]) != 0
# check if wine name mentioned in recentevents, only check first wine name is enough
# because agent will recommend every wines it found each time.
winenames = []
for wine in a.memory[:shortmem][:available_wine]
push!(winenames, wine["wine_name"])
end
for winename in winenames
if !occursin(winename, chathistory)
println("\n~~~ Yiem decisionMaker() found wines from DB ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
d = Dict(
:understanding=> "I understand that the customer is looking for a wine that matches their intention and budget.",
:reasoning=> "I checked the inventory and found wines that match the customer's criteria. I will present the wines to the customer.",
:plan=> "1) Provide detailed introductions of the wines you just found to the customer.
2) Explain how the wine could match the customer's intention and what its effects might mean for the customer's experience.
3) If multiple wines are available, highlight their differences and provide a comprehensive comparison of how each option aligns with the customer's intention and what the potential effects of each option could mean for the customer's experience.
4) Provide your personal recommendation based on your understanding of the customer's preferences.",
:action_name=> "PRESENTBOX",
:action_input=> "")
return d
end
end
end
context = # may b add wine name instead of the hold wine data is better
if length(a.memory[:shortmem][:available_wine]) != 0
winenames = []
for (i, wine) in enumerate(a.memory[:shortmem][:available_wine])
name = "$i) $(wine["wine_name"]) "
push!(winenames, name)
end
availableWineName = join(winenames, ',')
"You found information about the following wines in your inventory: $availableWineName"
else
""
end
errornote = ""
response = nothing # placeholder for show when error msg show up
for attempt in 1:10
QandA = generatequestion(a, a.func[:text2textInstructLLM]; recent=3)
usermsg =
"""
$context
Your recent events: $recentevents
Your Q&A: $QandA)
$errornote
"""
_prompt =
[
Dict(:name => "system", :text => systemmsg),
Dict(:name => "user", :text => usermsg)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
prompt *= """
<|start_header_id|>assistant<|end_header_id|>
"""
response = a.func[:text2textInstructLLM](prompt)
response = GeneralUtils.remove_french_accents(response)
response = replace(response, '*'=>"")
# check if response contain more than one functions from ["CHATBOX", "CHECKINVENTORY", "ENDCONVERSATION"]
count = 0
for i ∈ ["CHATBOX", "CHECKINVENTORY", "ENDCONVERSATION"]
if occursin(i, response)
count += 1
end
end
if count > 1
errornote = "You must use only one function"
println("Attempt $attempt $errornote ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
continue
end
responsedict = GeneralUtils.textToDict(response,
["Understanding", "Reasoning", "Plan", "Action_name", "Action_input"],
rightmarker=":", symbolkey=true, lowercasekey=true)
if responsedict[:action_name] ∉ ["CHATBOX", "CHECKINVENTORY", "ENDCONVERSATION"]
errornote = "You must use the given functions"
println("Attempt $attempt $errornote ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
continue
end
checkFlag = false
for i ∈ [:understanding, :plan, :action_name]
if length(responsedict[i]) == 0
error("$i is empty ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
errornote = "$i is empty"
println("Attempt $attempt $errornote ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
checkFlag = true
break
end
end
checkFlag == true ? continue : nothing
# check if there are more than 1 key per categories
checkFlag = false
for i ∈ [:understanding, :plan, :action_name, :action_input]
matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
if length(matchkeys) > 1
errornote = "DecisionMaker has more than one key per categories"
println("Attempt $attempt $errornote ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
checkFlag = true
break
end
end
checkFlag == true ? continue : nothing
println("\n~~~ Yiem decisionMaker() ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
pprintln(Dict(responsedict))
# check whether an agent recommend wines before checking inventory or recommend wines
# outside its inventory
# ask LLM whether there are any winery mentioned in the response
mentioned_winery = detectWineryName(a, response)
if mentioned_winery != "None"
mentioned_winery = String.(strip.(split(mentioned_winery, ",")))
# check whether the wine is in event
isWineInEvent = false
for winename in mentioned_winery
for event in a.memory[:events]
if event[:outcome] !== nothing && occursin(winename, event[:outcome])
isWineInEvent = true
break
end
end
end
# if wine is mentioned but not in timeline or shortmem,
# then the agent is not supposed to recommend the wine
if responsedict[:action_name] == "CHATBOX" &&
isWineInEvent == false
errornote = "Note: Before recommending a wine, ensure it's in your inventory. Check your stock first."
println("Attempt $attempt $errornote ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
continue
end
end
delete!(responsedict, :mentioned_winery)
return responsedict
end
error("DecisionMaker failed to generate a thought ", response)
end
end
""" Assigns a scalar value to each new child node to be used for selec-
tion and backpropagation. This value effectively quantifies the agent's progress in task completion,
serving as a heuristic to steer the search algorithm towards the most promising regions of the tree.
# Arguments
- `a::T1`
one of Yiem's agent
- `state::T2`
a game state
# Return
- `evaluation::Tuple{String, Integer}`
evaluation and score
# Example
```jldoctest
julia>
```
# Signature
"""
function evaluator(config::T1, state::T2
)::Tuple{String,Integer} where {T1<:AbstractDict,T2<:AbstractDict}
systemmsg =
"""
Analyze the trajectories of a solution to a question answering task. The trajectories are
labeled by environmental observations about the situation, thoughts that can reason about
the current situation and actions that can be three types:
1) CHECKINVENTORY[query], which you can use to find wine in your inventory.
2) CHATBOX[text], which you can use to interact with the user.
Given a question and a trajectory, evaluate its correctness and provide your reasoning and
analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
can be correct if the thoughts and actions so far are correct, even if the answer is not found
yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
where s is an integer from 0 to 10.
You should only respond in JSON format as describe below:
{"evaluation": "your evaluation", "score": "your evaluation score"}
Here are some examples:
user:
{
"question": "I'm looking for a sedan with an automatic driving feature.",
"thought_1": "I have many types of sedans in my inventory, each with diverse features.",
"thought_2": "But there is only 1 model that has the feature customer wanted.",
"thought_3": "I should check our inventory first to see if we have it.",
"action_1": {"name": "inventory", "input": "Yiem model A"},
"observation_1": "Yiem model A is in stock."
}
assistant
{
"evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
It is also better to have simple searches corresponding to a single entity, making this the best action.",
"score": 10
}
user:
{
"question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
"thought_1": "Let me check our inventory first to see if I have it.",
"action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
"observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
"thought_2": "Ok, I have what the user is asking. Let's tell the user.",
"action_2": {"name": "CHATBOX", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
"observation_1": "This is not what I wanted."
}
assistant:
{
"evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
"score": 0
}
Let's begin!
"""
usermsg = """
$(JSON3.write(state[:thoughtHistory]))
"""
chathistory =
[
Dict(:name => "system", :text => systemmsg),
Dict(:name => "user", :text => usermsg)
]
# put in model format
prompt = formatLLMtext(chathistory, "llama3instruct")
prompt *= """
<|start_header_id|>assistant<|end_header_id|>
{
"""
pprint(prompt)
externalService = config[:externalservice][:text2textinstruct]
# apply LLM specific instruct format
externalService = config[:externalservice][:text2textinstruct]
msgMeta = GeneralUtils.generate_msgMeta(
externalService[:mqtttopic],
senderName="evaluator",
senderId=string(uuid4()),
receiverName="text2textinstruct",
mqttBroker=config[:mqttServerInfo][:broker],
mqttBrokerPort=config[:mqttServerInfo][:port],
)
outgoingMsg = Dict(
:msgMeta => msgMeta,
:payload => Dict(
:text => prompt,
:kwargs => Dict(
:max_tokens => 512,
:stop => ["<|eot_id|>"],
)
)
)
for attempt in 1:5
try
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
_responseJsonStr = response[:response][:text]
expectedJsonExample = """
Here is an expected JSON format:
{"evaluation": "...", "score": "..."}
"""
responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
evaluationDict = copy(JSON3.read(responseJsonStr))
# check if dict has all required value
dummya::AbstractString = evaluationDict[:evaluation]
dummyb::Integer = evaluationDict[:score]
return (evaluationDict[:evaluation], evaluationDict[:score])
catch e
io = IOBuffer()
showerror(io, e)
errorMsg = String(take!(io))
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
end
end
error("evaluator failed to generate an evaluation")
end
"""
# Arguments
# Return
# Example
```jldoctest
julia>
```
# TODO
- [] update docstring
- [x] implement the function
- [x] add try block. check result that it is expected before returning
# Signature
"""
function reflector(config::T1, state::T2)::String where {T1<:AbstractDict,T2<:AbstractDict}
# https://github.com/andyz245/LanguageAgentTreeSearch/blob/main/hotpot/hotpot.py
_prompt =
"""
You are a helpful sommelier working for a wine store.
Your goal is to recommend the best wine from your inventory that match the user preferences.
You will be given a question and a trajectory of the previous help you've done for a user.
You were unsuccessful in helping the user either because you guessed the wrong answer with Finish[answer], or you didn't know the user enough.
In a few sentences, Diagnose a possible reason for failure and devise a new, concise, high level plan that aims to mitigate the same failure.
Use complete sentences.
You should only respond in JSON format as describe below:
{"reflection": "your relection"}
Here are some examples:
Previous Trial:
{
"question": "Hello, I would like a get a bottle of wine",
"thought_1": "A customer wants to buy a bottle of wine. Before making a recommendation, I need to know more about their preferences.",
"action_1": {"name": "CHATBOX", "input": "What is the occasion for which you're buying this wine?"},
"observation_1": "We are holding a wedding party",
"thought_2": "A wedding party, that's a great occasion! The customer might be looking for a celebratory drink. Let me ask some more questions to narrow down the options.",
"action_2": {"name": "CHATBOX", "input": "What type of food will you be serving at the wedding?"},
"observation_2": "It will be Thai dishes.",
"thought_3": "With Thai food, I should recommend a wine that complements its spicy and savory flavors. And since it's a celebratory occasion, the customer might prefer a full-bodied wine.",
"action_3": {"name": "CHATBOX", "input": "What is your budget for this bottle of wine?"},
"observation_3": "I would spend up to 50 bucks.",
"thought_4": "Now that I have some more information, it's time to narrow down the options.",
"action_4": {"name": "winestock", "input": "red wine with full body, pairs well with spicy food, budget \$50"},
"observation_4": "I found the following wines in our stock: \n{\n 1: El Enemigo Cabernet Franc 2019\n2: Tantara Chardonnay 2017\n\n}\n",
"thought_5": "Now that I have a list of potential wines, I need to know more about the customer's taste preferences.",
"action_5": {"name": "CHATBOX", "input": "What type of wine characteristics are you looking for? (e.g. tannin level, sweetness, intensity, acidity)"},
"observation_5": "I like full-bodied red wine with low tannin.",
"thought_6": "Now that I have more information about the customer's preferences, it's time to make a recommendation.",
"action_6": {"name": "recommendbox", "input": "El Enemigo Cabernet Franc 2019"},
"observation_6": "I don't like the one you recommend. I want dry wine."
}
{
"reflection": "I asked the user about the occasion, food type, and budget, and then searched for wine in the inventory right away. However, I should have asked the user for the specific wine type and their preferences in order to gather more information before making a recommendation."
}
Let's begin!
Previous trial:
$(JSON3.write(state[:thoughtHistory]))
{"reflection"
"""
# apply LLM specific instruct format
externalService = config[:externalservice][:text2textinstruct]
llminfo = externalService[:llminfo]
prompt =
if llminfo[:name] == "llama3instruct"
formatLLMtext_llama3instruct("system", _prompt)
else
error("llm model name is not defied yet $(@__LINE__)")
end
msgMeta = GeneralUtils.generate_msgMeta(
a.config[:externalservice][:text2textinstruct][:mqtttopic],
senderName="reflector",
senderId=string(uuid4()),
receiverName="text2textinstruct",
mqttBroker=config[:mqttServerInfo][:broker],
mqttBrokerPort=config[:mqttServerInfo][:port],
)
outgoingMsg = Dict(
:msgMeta => msgMeta,
:payload => Dict(
:text => prompt,
:kwargs => Dict(
:max_tokens => 512,
:stop => ["<|eot_id|>"],
)
)
)
for attempt in 1:5
try
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
_responseJsonStr = response[:response][:text]
expectedJsonExample = """
Here is an expected JSON format:
{"reflection": "..."}
"""
responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
reflectionDict = copy(JSON3.read(responseJsonStr))
# check if dict has all required value
dummya::AbstractString = reflectionDict[:reflection]
return reflectionDict[:reflection]
catch e
io = IOBuffer()
showerror(io, e)
errorMsg = String(take!(io))
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
end
end
error("reflector failed to generate a thought")
end
""" Chat with llm.
# Arguments
`a::agent`
an agent
# Return
None
# Example
```jldoctest
julia> using JSON3, UUIDs, Dates, FileIO, MQTTClient, ChatAgent
julia> const mqttBroker = "mqtt.yiem.cc"
julia> mqttclient, connection = MakeConnection(mqttBroker, 1883)
julia> tools=Dict( # update input format
"askbox"=>Dict(
:description => "Useful for when you need to ask the user for more context. Do not ask the user their own question.",
:input => "Input is a text in JSON format.{\"Q1\": \"How are you doing?\", \"Q2\": \"How may I help you?\"}",
:output => "" ,
:func => nothing,
),
)
julia> msgMeta = Dict(
:msgPurpose=> "updateStatus",
:from=> "agent",
:to=> "llmAI",
:requestresponse=> "request",
:sendto=> "", # destination topic
:replyTo=> "agent/api/v0.1.0/txt/response", # requester ask responseer to send reply to this topic
:repondToMsgId=> "", # responseer is responseing to this msg id
:taskstatus=> "", # "complete", "fail", "waiting" or other status
:timestamp=> Dates.now(),
:msgId=> "$(uuid4())",
)
julia> a = ChatAgent.agentReflex(
"Jene",
mqttclient,
msgMeta,
agentConfigTopic, # I need a function to send msg to config topic to get load balancer
role=:sommelier,
tools=tools
)
julia> newAgent = ChatAgent.agentReact(agent)
julia> response = ChatAgent.conversation(newAgent, "Hi! how are you?")
```
# TODO
- [] update docstring
- [] add recap to initialState for earlier completed question
# Signature
"""
function conversation(a::sommelier, userinput::Dict; maximumMsg=50)
# place holder
actionname = nothing
result = nothing
chatresponse = nothing
userinput[:text] = GeneralUtils.remove_french_accents(userinput[:text])
if userinput[:text] == "newtopic"
clearhistory(a)
return "Okay. What shall we talk about?"
else
# add usermsg to a.chathistory
addNewMessage(a, "user", userinput[:text]; maximumMsg=maximumMsg)
# add user activity to events memory
push!(a.memory[:events],
eventdict(;
event_description="the user talks to the assistant.",
timestamp=Dates.now(),
subject="user",
actioninput=userinput[:text],
)
)
# thinking loop until AI wants to communicate with the user
chatresponse = nothing
while chatresponse === nothing
actionname, result = think(a)
if actionname ∈ ["CHATBOX", "PRESENTBOX", "ENDCONVERSATION"]
chatresponse = result
end
end
addNewMessage(a, "assistant", chatresponse; maximumMsg=maximumMsg)
return chatresponse
end
end
function conversation(a::companion, userinput::Dict; maximumMsg=50)
chatresponse = nothing
if userinput[:text] == "newtopic"
clearhistory(a)
return "Okay. What shall we talk about?"
else
# add usermsg to a.chathistory
addNewMessage(a, "user", userinput[:text]; maximumMsg=maximumMsg)
# add user activity to events memory
push!(a.memory[:events],
eventdict(;
event_description="the user talks to the assistant.",
timestamp=Dates.now(),
subject="user",
actioninput=userinput[:text],
)
)
chatresponse = generatechat(a)
addNewMessage(a, "assistant", chatresponse; maximumMsg=maximumMsg)
push!(a.memory[:events],
eventdict(;
event_description="the assistant talks to the user.",
timestamp=Dates.now(),
subject="assistant",
actioninput=chatresponse,
)
)
return chatresponse
end
end
"""
# Arguments
# Return
# Example
```jldoctest
julia>
```
# TODO
- [] update docstring
# Signature
"""
function think(a::T)::NamedTuple{(:actionname, :result),Tuple{String,String}} where {T<:agent}
a.memory[:recap] = generateSituationReport(a, a.func[:text2textInstructLLM]; skiprecent=0)
thoughtDict = decisionMaker(a; recent=3)
actionname = thoughtDict[:action_name]
actioninput = thoughtDict[:action_input]
# map action and input() to llm function
response =
if actionname == "CHATBOX"
(result=thoughtDict[:plan], errormsg=nothing, success=true)
elseif actionname == "CHECKINVENTORY"
checkinventory(a, actioninput)
elseif actionname == "PRESENTBOX"
(result=actioninput, errormsg=nothing, success=true)
elseif actionname == "ENDCONVERSATION"
x = "Conclude the conversation, thanks the user then goodbye and inviting them to return next time."
(result=x, errormsg=nothing, success=true)
else
error("undefined LLM function. Requesting $actionname")
end
# this section allow LLM functions above to have different return values.
result = haskey(response, :result) ? response[:result] : nothing
rawresponse = haskey(response, :rawresponse) ? response[:rawresponse] : nothing
select = haskey(response, :select) ? response[:select] : nothing
reward::Integer = haskey(response, :reward) ? response[:reward] : 0
isterminal::Bool = haskey(response, :isterminal) ? response[:isterminal] : false
errormsg::Union{AbstractString,Nothing} = haskey(response, :errormsg) ? response[:errormsg] : nothing
success::Bool = haskey(response, :success) ? response[:success] : false
# manage memory (pass msg to generatechat)
if actionname ∈ ["CHATBOX", "PRESENTBOX", "ENDCONVERSATION"]
chatresponse = generatechat(a, thoughtDict)
push!(a.memory[:events],
eventdict(;
event_description="the assistant talks to the user.",
timestamp=Dates.now(),
subject="assistant",
thought=thoughtDict,
actionname=actionname,
actioninput=chatresponse,
)
# eventdict(;
# event_description="the assistant talks to the user.",
# timestamp=Dates.now(),
# subject="assistant",
# actioninput=chatresponse,
# )
)
result = chatresponse
elseif actionname == "CHECKINVENTORY"
if rawresponse !== nothing
vd = GeneralUtils.dfToVectorDict(rawresponse)
if length(a.memory[:shortmem][:available_wine]) == 0
a.memory[:shortmem][:available_wine] = vcat(a.memory[:shortmem][:available_wine], vd)
else
a.memory[:shortmem][:available_wine] = vd
end
else
println("checkinventory return nothing")
end
push!(a.memory[:events],
eventdict(;
event_description= "the assistant searched the database.",
timestamp= Dates.now(),
subject= "assistant",
thought=thoughtDict,
actionname=actionname,
actioninput= "I searched the database with this query: $actioninput",
outcome= "This is what I've found in the database, $result"
)
)
else
error("condition is not defined ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
end
return (actionname=actionname, result=result)
end
"""
# Arguments
- `a::T1`
one of ChatAgent's agent.
- `input::T2`
# Return
A JSON string of available wine
# Example
```jldoctest
julia>
```
# TODO
- [] update docs
# Signature
"""
function generatechat(a::sommelier, thoughtDict)
systemmsg =
"""
Your name is $(a.name). You are a helpful English-speaking assistant, acting as a polite, website-based sommelier for $(a.retailername)'s wine store.
You are currently talking with the user.
Your goal includes:
1) Help the user select the best wines from your inventory that align with the user's preferences.
Your responsibility includes:
1) Given the situation, convey your thoughts to the user.
Your responsibility excludes:
1) Asking or guiding the user to make a purchase
2) Processing sales orders or engaging in any other sales-related activities
3) Answering questions and offering additional services beyond just recommendations, such as delivery, box, gift wrapping, personalized messages. Customers can reach out to our sales at the store.
At each round of conversation, you will be given the current situation:
Your ongoing conversation with the user: ...
Context: ...
Your thoughts: Your current thoughts in your mind
You MUST follow the following guidelines:
- Do not offer additional services you didn't thought.
You should follow the following guidelines:
- Focus on the latest conversation.
- If the user interrupts, prioritize the user
- Be honest
- Medium and full-bodied red wines should not be paired with spicy foods.
You should then respond to the user with:
1) Chat: Given the situation, How would you respond to the user to express your thoughts honestly and keep the conversation going smoothly?
You should only respond in format as described below:
Chat: ...
Here are some examples of response format:
Chat: "I see. Let me think about it. I'll get back to you with my recommendation."
Let's begin!
"""
# a.memory[:shortmem][:available_wine] is a vector of dictionary
context =
if length(a.memory[:shortmem][:available_wine]) != 0
"Wines previously found in your inventory: $(availableWineToText(a.memory[:shortmem][:available_wine]))"
else
"N/A"
end
chathistory = chatHistoryToText(a.chathistory)
errornote = ""
response = nothing # placeholder for show when error msg show up
for attempt in 1:10
usermsg = """
Your ongoing conversation with the user: $chathistory
Contex: $context
Your thoughts: $(thoughtDict[:understanding]) $(thoughtDict[:reasoning]) $(thoughtDict[:plan])
$errornote
"""
_prompt =
[
Dict(:name => "system", :text => systemmsg),
Dict(:name => "user", :text => usermsg)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
prompt *= """
<|start_header_id|>assistant<|end_header_id|>
"""
try
response = a.func[:text2textInstructLLM](prompt)
# sometime the model response like this "here's how I would respond: ..."
if occursin("respond:", response)
errornote = "You don't need to intro your response"
error("generatechat() response contain : ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
end
response = GeneralUtils.remove_french_accents(response)
response = replace(response, '*'=>"")
response = replace(response, '$' => "USD")
response = replace(response, '`' => "")
response = GeneralUtils.remove_french_accents(response)
responsedict = GeneralUtils.textToDict(response, ["Chat"],
rightmarker=":", symbolkey=true, lowercasekey=true)
for i ∈ [:chat]
if length(JSON3.write(responsedict[i])) == 0
error("$i is empty ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
end
end
# check if there are more than 1 key per categories
for i ∈ [:chat]
matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
if length(matchkeys) > 1
error("generatechat has more than one key per categories")
end
end
# check if Context: is in chat
if occursin("Context:", responsedict[:chat])
error("Context: is in text. This is not allowed")
end
println("\n~~~ generatechat() ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
pprintln(Dict(responsedict))
# check whether an agent recommend wines before checking inventory or recommend wines
# outside its inventory
# ask LLM whether there are any winery mentioned in the response
mentioned_winery = detectWineryName(a, responsedict[:chat])
if mentioned_winery != "None"
mentioned_winery = String.(strip.(split(mentioned_winery, ",")))
# check whether the wine is in event
isWineInEvent = false
for winename in mentioned_winery
for event in a.memory[:events]
if event[:outcome] !== nothing && occursin(winename, event[:outcome])
isWineInEvent = true
break
end
end
end
# if wine is mentioned but not in timeline or shortmem,
# then the agent is not supposed to recommend the wine
if isWineInEvent == false
errornote = "Previously: You recommend a wine that is not in your inventory which is not allowed."
error("Previously: You recommend a wine that is not in your inventory which is not allowed.")
end
end
result = responsedict[:chat]
return result
catch e
io = IOBuffer()
showerror(io, e)
errorMsg = String(take!(io))
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
end
end
error("generatechat failed to generate a response")
end
function generatechat(a::companion)
systemmsg =
if a.systemmsg === nothing
systemmsg =
"""
You are a helpful assistant.
You are currently talking with the user.
Your goal includes:
1) Help the user as best as you can
At each round of conversation, you will be given the following information:
Your ongoing conversation with the user: ...
You should then respond to the user with:
1) chat: Given the information, what would you say to the user?
You should only respond in JSON format as described below:
{"chat": ...}
Let's begin!
"""
else
a.systemmsg
end
chathistory = chatHistoryToText(a.chathistory)
response = nothing # placeholder for show when error msg show up
for attempt in 1:10
usermsg = """
Your ongoing conversation with the user: $chathistory
"""
_prompt =
[
Dict(:name => "system", :text => systemmsg),
Dict(:name => "user", :text => usermsg)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
prompt *= """
<|start_header_id|>assistant<|end_header_id|>
"""
response = a.text2textInstructLLM(prompt)
return response
end
error("generatechat failed to generate a response")
end
function generatequestion(a, text2textInstructLLM::Function; recent=nothing)::String
systemmsg =
"""
Your name is $(a.name). You are a helpful English-speaking, website-based sommelier for $(a.retailername)'s online store.
Your goal includes:
1) Help the user select the best wines from your inventory that align with the user's preferences
2) Thanks the user when they don't need any further assistance and invite them to comeback next time
Your responsibility includes:
1) Ask yourself what to do about the current situation
Your responsibility does not include:
1) Processing sales orders or engaging in any other sales-related activities.
2) Answering questions and offering additional services beyond just recommendations.
At each round of conversation, you will be given the current situation:
Recap: recap of what has happened so far
Your recent events: latest 5 events of the situation
You must follow the following guidelines:
- Your question should be specific, self-contained and not require any additional context.
- Once the user has chose their wine, ask the user if they need any further assistance. Do not offer any additional services. If the user doesn't need any further assistance, say goodbye and invite them to come back next time.
You should follow the following guidelines:
- Focus on the latest conversation
- If the user interrupts, prioritize the user
- If you don't already know, find out the user's budget
- If you don't already know, find out the type of wine the user is looking for, such as red, white, sparkling, rose, dessert, fortified
- If you don't already know, find out the occasion for which the user is buying wine
- If you don't already know, find out the characteristics of wine the user is looking for, such as tannin, sweetness, intensity, acidity
- If you don't already know, find out what food will be served with wine
- If you haven't already, introduce the wines you found in the database to the user first
- Generally speaking, your inventory has some wines from France, the United States, Australia, Spain, and Italy, but you won't know exactly until you check your inventory.
- All wines in your inventory are always in stock.
- Engage in conversation to indirectly investigate the customer's intention, budget and preferences before checking your inventory.
- Do not ask the user about wine's flavor e.g. floral, citrusy, nutty or some thing similar as these terms cannot be used to search the database.
- Once the user has selected their wine, ask the user if they need any further assistance. Do not offer any additional services. If the user doesn't need any further assistance, say goodbye and invite them to come back next time.
- Medium and full-bodied red wines should not be paired with spicy foods.
- If a customer requests information about discounts, quantity, rewards programs, promotions, delivery options, boxes, gift wrapping, packaging, or personalized messages, please inform them that they can contact our sales team at the store.
You should then respond to the user with:
1) Understanding:
- State your understanding about the current situation
2) Q: Given the situation, "ask yourself" at least five, but no more than ten, questions
3) A: Given the situation, "answer to yourself" the best you can
- Do not generate any text after the last answer.
You must only respond in format as described below:
Understanding: ...
Q1: ...
A1: ...
Q2: ...
A2: ...
Q3: ...
A3: ...
...
Here are some examples:
Q: The user is buying for her husband, should I dig in to get more information?
A: Yes, I should. So that I have better idea about the user's preferences.
Q: Why the user saying this?
A: According to the situation, ...
Q: The user is asking for a cappuccino. Do I have it at my cafe?
A: No I don't.
Q: Since I don't have a cappuccino but I have a Late, should I ask if they are okay with that?
A: Yes, I should.
Q: Are they allergic to milk?
A: According to the situation, since they mentioned a cappuccino before, it seems they are not allergic to milk.
Q: Have I checked the inventory yet?
A: According to the situation, no. I need more information.
Q: Should I check the inventory now?
A: According to the situation, ...
Q: What do I have in the inventory?
A: According to the situation, ...
Q: Which items are within the user price range? And which items are out of the user price rance?
A: According to the situation, ...
Q: Do I have them in stock?
A: According to the situation, ...
Q: Did I introduce them to the user already?
A: According to the situation, No.
Q: Am I certain about the information I'm going to share with the user, or should I verify the information first?
A: According to the situation, ...
Let's begin!
"""
totalevents = length(a.memory[:events])
ind =
if totalevents > recent
start = totalevents - recent
start:totalevents
else
1:totalevents
end
timeline = ""
for (i, event) in enumerate(a.memory[:events][ind])
if event[:outcome] === nothing
timeline *= "$i) $(event[:subject])> $(event[:actioninput])\n"
else
timeline *= "$i) $(event[:subject])> $(event[:actioninput]) $(event[:outcome])\n"
end
end
errornote = ""
response = nothing # store for show when error msg show up
recap =
if length(a.memory[:recap]) <= recent
"None"
else
recapkeys = keys(a.memory[:recap])
recapkeys_vec = [i for i in recapkeys]
recapkeys_vec = recapkeys_vec[1:end-recent]
tempmem = OrderedDict()
for (k, v) in a.memory[:recap]
if k ∈ recapkeys_vec
tempmem[k] = v
end
end
GeneralUtils.dictToString(tempmem)
end
for attempt in 1:10
usermsg =
"""
Recap: $recap)
Your recent events: $timeline
$errornote
"""
_prompt =
[
Dict(:name => "system", :text => systemmsg),
Dict(:name => "user", :text => usermsg)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
prompt *= """
<|start_header_id|>assistant<|end_header_id|>
"""
try
response = text2textInstructLLM(prompt)
# sometime LLM generate more than 1 Understanding:
understanding_number = count("Understanding:", response)
if understanding_number > 1
x = split(response, "Understanding:")[2]
response = "Understanding:" * x
end
q_number = count("Q", response)
# check for valid response
q_atleast = length(a.memory[:events]) <= 2 ? 1 : 3
if q_number < q_atleast
error("too few questions only $q_number questions are generated ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
# check whether "A1" is in the response, if not error.
elseif !occursin("A1:", response)
error("no answer found in the response ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
end
responsedict = GeneralUtils.textToDict(response,
["Understanding", "Q1"],
rightmarker=":", symbolkey=true, lowercasekey=true)
response = "Q1: " * responsedict[:q1]
println("\n~~~ generatequestion ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
pprintln(response)
return response
catch e
io = IOBuffer()
showerror(io, e)
errorMsg = String(take!(io))
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
end
end
error("generatequestion failed to generate a response ", response)
end
function generateSituationReport(a, text2textInstructLLM::Function; skiprecent::Integer=0
)::OrderedDict
systemmsg =
"""
You are an assistant being in the given events.
Your task is to writes a summary for each event seperately into an ongoing, interleaving series.
At each round of conversation, you will be given the situation:
Total events: number of events you need to summarize.
Events timeline: ...
Context: ...
You should then respond to the user with:
event: a detailed summary for each event without exaggerated details.
You must only respond in format as described below:
Event_1: ...
Event_2: ...
...
Here are some examples:
Event_1: The user ask me about where to buy a toy.
Event_2: I told the user to go to the store at 2nd floor.
Event_1: The user greets the assistant by saying 'hello'.
Event_2: The assistant respond warmly and inquire about how he can assist the user.
Let's begin!
"""
if length(a.memory[:events]) <= skiprecent
return nothing
end
events = a.memory[:events][1:end-skiprecent]
timeline = createTimeline(a.memory[:events]; skiprecent=skiprecent)
errornote = ""
response = nothing # store for show when error msg show up
for attempt in 1:10
usermsg = """
Total events: $(length(events))
Events timeline: $timeline
$errornote
"""
_prompt =
[
Dict(:name => "system", :text => systemmsg),
Dict(:name => "user", :text => usermsg)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
prompt *= """
<|start_header_id|>assistant<|end_header_id|>
"""
response = text2textInstructLLM(prompt)
eventheader = ["Event_$i" for i in eachindex(a.memory[:events])]
responsedict = GeneralUtils.textToDict(response, eventheader,
rightmarker=":", symbolkey=true)
println("\n~~~ generateSituationReport() ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
pprintln(response)
return responsedict
end
error("generateSituationReport failed to generate a response ", response)
end
function detectWineryName(a, text)
systemmsg =
"""
You are a sommelier of a wine store.
Your task is to identify and list any winery names mentioned in the provided text.
At each round of conversation, you will be given the situation:
Text: a text describing the situation.
Tips:
- Winery usually contains Château, Chateau, Domaine, Côte, Cotes, St. de, or a combination of these words.
You should then respond to the user with:
Winery_names: A list of winery names mentioned in the text or "None" if no winery name is mentioned.
You must only respond in format as described below:
Winery_names: ...
Here are some examples:
Winery_names: Domaine Courbis, Chateau Lafite Rothschild, Matarromera Domaine Roulot, Château, Cotes
Let's begin!
"""
response = nothing # placeholder for show when error msg show up
for attempt in 1:10
usermsg = """
Text: $text
"""
_prompt =
[
Dict(:name => "system", :text => systemmsg),
Dict(:name => "user", :text => usermsg)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
prompt *= """
<|start_header_id|>assistant<|end_header_id|>
"""
try
response = a.func[:text2textInstructLLM](prompt)
println("\n~~~ detectWineryName() ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
pprintln(response)
responsedict = GeneralUtils.textToDict(response, ["winery_names"],
rightmarker=":", symbolkey=true, lowercasekey=true)
result = responsedict[:winery_names]
return result
catch e
io = IOBuffer()
showerror(io, e)
errorMsg = String(take!(io))
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
println("\n Attempt $attempt. Error occurred: $errorMsg\n$st ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
end
end
error("detectWineryName failed to generate a response")
end
end # module interface