2080 lines
72 KiB
Julia
2080 lines
72 KiB
Julia
module interface
|
|
|
|
export addNewMessage, conversation, decisionMaker, evaluator, reflector
|
|
# isterminal,
|
|
|
|
using JSON3, DataStructures, Dates, UUIDs, HTTP, Random, MQTTClient, PrettyPrinting, Serialization
|
|
using GeneralUtils, LLMMCTS
|
|
using ..type, ..util, ..llmfunction
|
|
|
|
# ------------------------------------------------------------------------------------------------ #
|
|
# pythoncall setting #
|
|
# ------------------------------------------------------------------------------------------------ #
|
|
# Ref: https://github.com/JuliaPy/PythonCall.jl/issues/252
|
|
# by setting the following variables, PythonCall.jl will use:
|
|
# 1. system's python and packages installed by system (via apt install)
|
|
# or 2. conda python and packages installed by conda
|
|
# if these setting are not set (comment out), PythonCall will use its own python and packages that
|
|
# installed by CondaPkg.jl (from env_preparation.jl)
|
|
# ENV["JULIA_CONDAPKG_BACKEND"] = "Null" # set condapkg backend = none
|
|
# systemPython = split(read(`which python`, String), "\n")[1] # system's python path
|
|
# ENV["JULIA_PYTHONCALL_EXE"] = systemPython # find python location with $> which python ex. raw"/root/conda/bin/python"
|
|
|
|
# using PythonCall
|
|
# const py_agents = PythonCall.pynew()
|
|
# const py_llms = PythonCall.pynew()
|
|
# function __init__()
|
|
# # PythonCall.pycopy!(py_cv2, pyimport("cv2"))
|
|
|
|
# # equivalent to from urllib.request import urlopen in python
|
|
# PythonCall.pycopy!(py_agents, pyimport("langchain.agents"))
|
|
# PythonCall.pycopy!(py_llms, pyimport("langchain.llms"))
|
|
# end
|
|
|
|
# ---------------------------------------------- 100 --------------------------------------------- #
|
|
|
|
|
|
macro executeStringFunction(functionStr, args...)
|
|
# Parse the function string into an expression
|
|
func_expr = Meta.parse(functionStr)
|
|
|
|
# Create a new function with the parsed expression
|
|
function_to_call = eval(Expr(:function,
|
|
Expr(:call, func_expr, args...), func_expr.args[2:end]...))
|
|
|
|
# Call the newly created function with the provided arguments
|
|
function_to_call(args...)
|
|
end
|
|
|
|
|
|
""" Think and choose action
|
|
|
|
# Arguments
|
|
- `config::T1`
|
|
config
|
|
- `state::T2`
|
|
a game state
|
|
|
|
# Return
|
|
- `thoughtDict::Dict`
|
|
|
|
# Example
|
|
```jldoctest
|
|
julia> config = Dict(
|
|
:mqttServerInfo => Dict(
|
|
:description => "mqtt server info",
|
|
:port => 1883,
|
|
:broker => "mqtt.yiem.cc"
|
|
),
|
|
:externalservice => Dict(
|
|
:text2textinstruct => Dict(
|
|
:mqtttopic => "/loadbalancer/requestingservice",
|
|
:description => "text to text service with instruct LLM",
|
|
:llminfo => Dict(
|
|
:name => "llama3instruct"
|
|
)
|
|
),
|
|
)
|
|
)
|
|
|
|
julia> output_thoughtDict = Dict(
|
|
:thought_1 => "The customer wants to buy a bottle of wine. This is a good start!",
|
|
:action_1 => Dict{Symbol, Any}(
|
|
:action=>"CHATBOX",
|
|
:input=>"What occasion are you buying the wine for?"
|
|
),
|
|
:observation_1 => ""
|
|
)
|
|
```
|
|
|
|
# TODO
|
|
- [] update docstring
|
|
- [x] implement the function
|
|
- [] implement RAG to pull similar experience
|
|
- [] use customerinfo
|
|
- [] user storeinfo
|
|
|
|
# Signature
|
|
"""
|
|
function decisionMaker(a::T)::Dict{Symbol, Any} where {T<:agent}
|
|
|
|
# lessonDict = copy(JSON3.read("lesson.json"))
|
|
|
|
# lesson =
|
|
# if isempty(lessonDict)
|
|
# ""
|
|
# else
|
|
# lessons = Dict{Symbol, Any}()
|
|
# for (k, v) in lessonDict
|
|
# lessons[k] = lessonDict[k][:lesson]
|
|
# end
|
|
|
|
# """
|
|
# You have attempted to help the user before and failed, either because your reasoning for the
|
|
# recommendation was incorrect or your response did not exactly match the user expectation.
|
|
# The following lesson(s) give a plan to avoid failing to help the user in the same way you
|
|
# did previously. Use them to improve your strategy to help the user.
|
|
|
|
# Here are some lessons in JSON format:
|
|
# $(JSON3.write(lessons))
|
|
|
|
# When providing the thought and action for the current trial, that into account these failed
|
|
# trajectories and make sure not to repeat the same mistakes and incorrect answers.
|
|
# """
|
|
# end
|
|
|
|
# _prompt =
|
|
# """
|
|
# You are a helpful sommelier working for a wine store.
|
|
# Your goal is to recommend the best wine from your inventory that match the user preferences.
|
|
# You are also keen to improve your recommendation with lesson(s).
|
|
|
|
# You must follow the following criteria:
|
|
# 1) Get to know how much the user willing to spend
|
|
# 2) Get to know type of wine the user is looking for e.g. red, white, sparkling, rose, dessert, fortified
|
|
# 3) Get to know what occasion the user is buying wine for
|
|
# 4) Get to know what characteristics of wine the user is looking for
|
|
# e.g. tannin, sweetness, intensity, acidity
|
|
# 5) Get to know what food the user will have with wine
|
|
# 6) Check your inventory for the best wine that match the user preference
|
|
# 7) Recommend wine to the user
|
|
|
|
# You should only respond with interleaving Thought, Action, Observation steps.
|
|
# Thought can reason about the current situation, and Action can be three types:
|
|
# 1) winestock[query], which you can use to find wine in your inventory. The more input data the better.
|
|
# 2) CHATBOX[text], which you can use to interact with the user.
|
|
# After each observation, provide the next Thought and next Action.
|
|
|
|
# You should only respond in JSON format as describe below:
|
|
# {
|
|
# "thought": "your reasoning",
|
|
# "action": {"name": "action to take", "input": "action input"},
|
|
# "observation": "result of the action"
|
|
# }
|
|
|
|
# Here are some examples:
|
|
# {
|
|
# "question": "I would like to buy a sedan with 8 seats.",
|
|
# "thought_1": "Our showroom carries various vehicle model. But I'm not sure whether we have a models that fits the user demand, I need to check our inventory.",
|
|
# "action_1": {"name": "inventory", "input": "sedan with 8 seats."},
|
|
# "observation_1": "Several model has 8 seats. Available color are black, red green"
|
|
# }
|
|
# {
|
|
# "thought": "I have a few color for the user to choose from. I will ask him what color he likes.",
|
|
# "action": {"name": "CHATBOX", "input": "Which color do you like?"}
|
|
# "observation": "I'll take black."
|
|
# }
|
|
|
|
# $lesson
|
|
|
|
# Let's begin!
|
|
|
|
# $(JSON3.write(state[:thoughtHistory]))
|
|
# {"thought"
|
|
# """
|
|
|
|
# systemmsg =
|
|
# """
|
|
# You are a helpful sommelier working for a wine store.
|
|
# Your task is to help the user choose the best wine that match the user preferences from your inventory.
|
|
# You are also eager to improve your helpfulness.
|
|
|
|
# You must follow the following guidelines:
|
|
# - Get to know how much the user willing to spend
|
|
# - Get to know type of wine the user is looking for e.g. red, white, sparkling, rose, dessert, fortified
|
|
# - Get to know what occasion the user is buying wine for
|
|
# - Get to know what characteristics of wine the user is looking for e.g. tannin, sweetness, intensity, acidity
|
|
# - Get to know what food the user will have with wine
|
|
|
|
# At each round of conversation, the user will give you the current situation:
|
|
# Context: ...
|
|
# Your earlier conversation with the user: ...
|
|
|
|
# You should then respond to the user with interleaving Thought, Plan, Action and Observation:
|
|
# - thought:
|
|
# 1) State your reasoning about the current situation.
|
|
# - plan: Based on the current situation, what would you do to complete the task? Be specific.
|
|
# - action (Must be aligned with your plan): Can be one of the following functions:
|
|
# 1) CHATBOX[text], which you can use to talk with the user. "text" is in verbal English.
|
|
# 2) CHECKINVENTORY[query], which you can use to find info about wine in your inventory. "query" is a search term in verbal English.
|
|
# - observation: result of the action.
|
|
|
|
# You should only respond in format as described below:
|
|
# thought: ...
|
|
# plan: ...
|
|
# action_name: ...
|
|
# action_input: ...
|
|
# observation: ...
|
|
|
|
# Let's begin!
|
|
# """
|
|
|
|
systemmsg =
|
|
"""
|
|
You are a helpful assistant acting as a polite, website-based sommelier for an online wine store.
|
|
Your goal is: Help the user select the best wines from your inventory that align with the user's preferences.
|
|
|
|
Your responsibility includes:
|
|
1) Make an informed decision about what you need to do to achieve the goal.
|
|
2) Thanks the user when they are done choosing wines and invite them to comeback next time.
|
|
|
|
Your responsibility does not include:
|
|
1) Processing sales orders or engaging in any other sales-related activities.
|
|
|
|
At each round of conversation, you will be given the current situation:
|
|
Recap: ...
|
|
Context: ...
|
|
|
|
You MUST follow the following guidelines:
|
|
- Generally speaking, your inventory has some wines from France, the United States, Australia, Spain, and Italy, but you won't know which wines your store carries until you check your inventory.
|
|
- All wines in your inventory are always in stock.
|
|
- Use the "understand-then-check" inventory strategy to understand the user, as there are many wines in the inventory.
|
|
- Do not ask the user about wine's flavor e.g. floral, citrusy, nutty or some thing similar as these terms cannot be used to search the database.
|
|
- Once the user has selected their wine, ask the user if they need any further assistance. Do not offer any additional services. If the user doesn't need any further assistance, say goodbye and invite them to come back next time.
|
|
|
|
You should follow the following guidelines as you see fit:
|
|
- If the user interrupts, prioritize the user.
|
|
- If you don't already know, find out the user's budget.
|
|
- If you don't already know, find out the type of wine the user is looking for, such as red, white, sparkling, rose, dessert, fortified.
|
|
- If you don't already know, find out the occasion for which the user is buying wine.
|
|
- If you don't already know, find out the characteristics of wine the user is looking for, such as tannin, sweetness, intensity, acidity.
|
|
- If you don't already know, find out what food will be served with wine.
|
|
- If you haven't already, introduce the wines you found in the database to the user first.
|
|
|
|
You should then respond to the user with interleaving Thought, Plan, Action:
|
|
1) thought:
|
|
- State your reasoning about the current situation.
|
|
2) plan: Based on the current situation, state a complete plan to complete the task. Be specific.
|
|
3) action_name (Must be aligned with your plan): The name of the action which can be one of the following functions:
|
|
- CHATBOX which you can use to generate conversation in order to communicate with the user. The input is your intentions for the dialogue. Be specific.
|
|
- CHECKINVENTORY which you can use to check info about wine in your inventory. The input is a search term in verbal English.
|
|
Good query example: black car, a stereo, 200 mile range, electric motor.
|
|
- PRESENTBOX which you can use to introduce / suggest / recommend wines you just found in the database to the user. It is better than the CHATBOX function for presenting wines. The input is the names of wines to introduce.
|
|
- ENDCONVERSATION which you can use when you want to finish the conversation with the user. The input is "NA".
|
|
4) action_input: input of the action
|
|
5) mentioning_wine: Are you mentioning specific wine name to the user? Can be "Yes" or "No"
|
|
|
|
You should only respond in format as described below:
|
|
thought: Let's think step by step. In order to ...
|
|
plan: ...
|
|
action_name: ...
|
|
action_input: ...
|
|
mentioning_wine: ...
|
|
|
|
Let's begin!
|
|
"""
|
|
|
|
context =
|
|
if length(a.memory[:shortmem]) > 0
|
|
vectorOfDictToText(a.memory[:shortmem], withkey=false)
|
|
else
|
|
""
|
|
end
|
|
|
|
# get only the latest 3 conversation
|
|
x = length(a.chathistory) < 3 ? length(a.chathistory) : 3
|
|
x = x - 1
|
|
chathistory = vectorOfDictToText(a.chathistory[end-x:end])
|
|
|
|
errornote = ""
|
|
response = nothing # placeholder for show when error msg show up
|
|
|
|
for attempt in 1:10
|
|
usermsg =
|
|
"""
|
|
Recap: $(a.memory[:recap])
|
|
Your recent conversation with the user: $chathistory
|
|
Your Q&A: $(a.memory[:QandA])
|
|
$errornote
|
|
"""
|
|
|
|
_prompt =
|
|
[
|
|
Dict(:name=> "system", :text=> systemmsg),
|
|
Dict(:name=> "user", :text=> usermsg)
|
|
]
|
|
|
|
# put in model format
|
|
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
|
prompt *=
|
|
"""
|
|
<|start_header_id|>assistant<|end_header_id|>
|
|
"""
|
|
|
|
try
|
|
response = a.text2textInstructLLM(prompt)
|
|
responsedict = GeneralUtils.textToDict(response,
|
|
["thought", "plan", "action_name", "action_input", "mentioning_wine"],
|
|
rightmarker=":", symbolkey=true)
|
|
# if occursin('[', responsedict[:action_name])
|
|
# action_input = GeneralUtils.getStringBetweenCharacters(responsedict[:action_name], '[', ']')
|
|
# action_name = string(split(responsedict[:action_name], '[')[1])
|
|
# end
|
|
|
|
if responsedict[:action_name] ∉ ["CHATBOX", "PRESENTBOX", "CHECKINVENTORY", "ENDCONVERSATION"]
|
|
errornote = "You must use the given functions"
|
|
error("You must use the given functions ", @__FILE__, " ", @__LINE__)
|
|
end
|
|
|
|
for i ∈ [:thought, :plan, :action_name]
|
|
if length(responsedict[i]) == 0
|
|
error("$i is empty ", @__FILE__, " ", @__LINE__)
|
|
end
|
|
end
|
|
|
|
# check if there are more than 1 key per categories
|
|
for i ∈ [:thought, :plan, :action_name, :action_input, :mentioning_wine]
|
|
matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
|
|
if length(matchkeys) > 1
|
|
error("DecisionMaker has more than one key per categories")
|
|
end
|
|
end
|
|
|
|
println("")
|
|
println("--> Yiem decisionMaker() ", @__FILE__, " ", @__LINE__)
|
|
pprintln(responsedict)
|
|
|
|
# check if LLM recommend wine before checking inventory
|
|
isMemEmpty = isempty(a.memory[:shortmem])
|
|
if occursin("Yes", responsedict[:mentioning_wine]) && isMemEmpty &&
|
|
responsedict[:action_name] != "CHECKINVENTORY"
|
|
|
|
errornote = "Note: You can't recommend wines yet. You must check your inventory before recommending wine to the user."
|
|
error( "You can't recommend wines yet. You must check your inventory before recommending wines")
|
|
elseif responsedict[:action_name] == "PRESENTBOX" && isMemEmpty &&
|
|
responsedict[:action_name] != "CHECKINVENTORY"
|
|
|
|
errornote = "Note: You can't recommend wines yet. You must check your inventory before recommending wine to the user."
|
|
error( "You can't recommend wines yet. You must check your inventory before recommending wines")
|
|
else
|
|
errornote = ""
|
|
end
|
|
|
|
delete!(responsedict, :mentioning_wine)
|
|
|
|
# if length(a.memory[:shortmem]) > 0 && responsedict[:action_name] != "PRESENTBOX"
|
|
# responsedict[:action_name] = "PRESENTBOX"
|
|
# end
|
|
|
|
return responsedict
|
|
catch e
|
|
io = IOBuffer()
|
|
showerror(io, e)
|
|
errorMsg = String(take!(io))
|
|
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
|
println("")
|
|
println("Attempt $attempt. Error occurred: $errorMsg\n$st")
|
|
println("")
|
|
end
|
|
end
|
|
error("DecisionMaker failed to generate a thought ", response)
|
|
end
|
|
# function decisionMaker(a::T)::Dict{Symbol, Any} where {T<:agent}
|
|
|
|
# # lessonDict = copy(JSON3.read("lesson.json"))
|
|
|
|
# # lesson =
|
|
# # if isempty(lessonDict)
|
|
# # ""
|
|
# # else
|
|
# # lessons = Dict{Symbol, Any}()
|
|
# # for (k, v) in lessonDict
|
|
# # lessons[k] = lessonDict[k][:lesson]
|
|
# # end
|
|
|
|
# # """
|
|
# # You have attempted to help the user before and failed, either because your reasoning for the
|
|
# # recommendation was incorrect or your response did not exactly match the user expectation.
|
|
# # The following lesson(s) give a plan to avoid failing to help the user in the same way you
|
|
# # did previously. Use them to improve your strategy to help the user.
|
|
|
|
# # Here are some lessons in JSON format:
|
|
# # $(JSON3.write(lessons))
|
|
|
|
# # When providing the thought and action for the current trial, that into account these failed
|
|
# # trajectories and make sure not to repeat the same mistakes and incorrect answers.
|
|
# # """
|
|
# # end
|
|
|
|
# _prompt =
|
|
# """
|
|
# You are a helpful sommelier working for a wine store.
|
|
# Your goal is to recommend the best wine from your inventory that match the user preferences.
|
|
# You are also keen to improve your recommendation with lesson(s).
|
|
|
|
# You must follow the following criteria:
|
|
# 1) Get to know how much the user willing to spend
|
|
# 2) Get to know type of wine the user is looking for e.g. red, white, sparkling, rose, dessert, fortified
|
|
# 3) Get to know what occasion the user is buying wine for
|
|
# 4) Get to know what characteristics of wine the user is looking for
|
|
# e.g. tannin, sweetness, intensity, acidity
|
|
# 5) Get to know what food the user will have with wine
|
|
# 6) Check your inventory for the best wine that match the user preference
|
|
# 7) Recommend wine to the user
|
|
|
|
# You should only respond with interleaving Thought, Action, Observation steps.
|
|
# Thought can reason about the current situation, and Action can be three types:
|
|
# 1) winestock[query], which you can use to find wine in your inventory. The more input data the better.
|
|
# 2) CHATBOX[text], which you can use to interact with the user.
|
|
# After each observation, provide the next Thought and next Action.
|
|
|
|
# You should only respond in JSON format as describe below:
|
|
# {
|
|
# "thought": "your reasoning",
|
|
# "action": {"name": "action to take", "input": "action input"},
|
|
# "observation": "result of the action"
|
|
# }
|
|
|
|
# Here are some examples:
|
|
# {
|
|
# "question": "I would like to buy a sedan with 8 seats.",
|
|
# "thought_1": "Our showroom carries various vehicle model. But I'm not sure whether we have a models that fits the user demand, I need to check our inventory.",
|
|
# "action_1": {"name": "inventory", "input": "sedan with 8 seats."},
|
|
# "observation_1": "Several model has 8 seats. Available color are black, red green"
|
|
# }
|
|
# {
|
|
# "thought": "I have a few color for the user to choose from. I will ask him what color he likes.",
|
|
# "action": {"name": "CHATBOX", "input": "Which color do you like?"}
|
|
# "observation": "I'll take black."
|
|
# }
|
|
|
|
# $lesson
|
|
|
|
# Let's begin!
|
|
|
|
# $(JSON3.write(state[:thoughtHistory]))
|
|
# {"thought"
|
|
# """
|
|
|
|
# # apply LLM specific instruct format
|
|
# externalService = config[:externalservice][:text2textinstruct]
|
|
# llminfo = externalService[:llminfo]
|
|
# prompt =
|
|
# if llminfo[:name] == "llama3instruct"
|
|
# formatLLMtext_llama3instruct("system", _prompt)
|
|
# else
|
|
# error("llm model name is not defied yet $(@__LINE__)")
|
|
# end
|
|
|
|
# msgMeta = GeneralUtils.generate_msgMeta(
|
|
# externalService[:mqtttopic],
|
|
# senderName= "decisionMaker",
|
|
# senderId= string(uuid4()),
|
|
# receiverName= "text2textinstruct",
|
|
# mqttBroker= config[:mqttServerInfo][:broker],
|
|
# mqttBrokerPort= config[:mqttServerInfo][:port],
|
|
# )
|
|
|
|
# outgoingMsg = Dict(
|
|
# :msgMeta=> msgMeta,
|
|
# :payload=> Dict(
|
|
# :text=> prompt,
|
|
# :kwargs=> Dict(
|
|
# :max_tokens=> 512,
|
|
# :stop=> ["<|eot_id|>"],
|
|
# )
|
|
# )
|
|
# )
|
|
# @show outgoingMsg
|
|
|
|
# for attempt in 1:5
|
|
# try
|
|
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
|
|
# _responseJsonStr = response[:response][:text]
|
|
# expectedJsonExample =
|
|
# """
|
|
# Here is an expected JSON format:
|
|
# {
|
|
# "thought": "...",
|
|
# "action": {"name": "...", "input": "..."},
|
|
# "observation": "..."
|
|
# }
|
|
# """
|
|
# responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
|
|
# thoughtDict = copy(JSON3.read(responseJsonStr))
|
|
|
|
# # check if dict has all required value
|
|
# thought::AbstractString = thoughtDict[:thought]
|
|
# actionname::AbstractString = thoughtDict[:action][:name]
|
|
# actioninput::AbstractString = thoughtDict[:action][:input]
|
|
# if actionname ∈ ["winestock", "CHATBOX", "recommendbox"]
|
|
# # LLM use available function
|
|
# elseif thought == ""
|
|
# error("DecisionMaker has no thought")
|
|
# elseif length(actioninput) == 0
|
|
# error("DecisionMaker has no actioninput")
|
|
# else
|
|
# error("DecisionMaker use wrong function")
|
|
# end
|
|
|
|
# return thoughtDict
|
|
# catch e
|
|
# io = IOBuffer()
|
|
# showerror(io, e)
|
|
# errorMsg = String(take!(io))
|
|
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
|
# println("")
|
|
# @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
|
|
# println("")
|
|
# end
|
|
# end
|
|
# error("DecisionMaker failed to generate a thought")
|
|
# end
|
|
|
|
|
|
""" Assigns a scalar value to each new child node to be used for selec-
|
|
tion and backpropagation. This value effectively quantifies the agent's progress in task completion,
|
|
serving as a heuristic to steer the search algorithm towards the most promising regions of the tree.
|
|
|
|
# Arguments
|
|
- `a::T1`
|
|
one of Yiem's agent
|
|
- `state::T2`
|
|
a game state
|
|
|
|
# Return
|
|
- `evaluation::Tuple{String, Integer}`
|
|
evaluation and score
|
|
|
|
# Example
|
|
```jldoctest
|
|
julia>
|
|
```
|
|
|
|
# Signature
|
|
"""
|
|
function evaluator(config::T1, state::T2
|
|
)::Tuple{String, Integer} where {T1<:AbstractDict, T2<:AbstractDict}
|
|
|
|
systemmsg =
|
|
"""
|
|
Analyze the trajectories of a solution to a question answering task. The trajectories are
|
|
labeled by environmental observations about the situation, thoughts that can reason about
|
|
the current situation and actions that can be three types:
|
|
1) CHECKINVENTORY[query], which you can use to find wine in your inventory.
|
|
2) CHATBOX[text], which you can use to interact with the user.
|
|
|
|
Given a question and a trajectory, evaluate its correctness and provide your reasoning and
|
|
analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
|
|
can be correct if the thoughts and actions so far are correct, even if the answer is not found
|
|
yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
|
|
where s is an integer from 0 to 10.
|
|
|
|
You should only respond in JSON format as describe below:
|
|
{"evaluation": "your evaluation", "score": "your evaluation score"}
|
|
|
|
Here are some examples:
|
|
user:
|
|
{
|
|
"question": "I'm looking for a sedan with an automatic driving feature.",
|
|
"thought_1": "I have many types of sedans in my inventory, each with diverse features.",
|
|
"thought_2": "But there is only 1 model that has the feature customer wanted.",
|
|
"thought_3": "I should check our inventory first to see if we have it.",
|
|
"action_1": {"name": "inventory", "input": "Yiem model A"},
|
|
"observation_1": "Yiem model A is in stock."
|
|
}
|
|
assistant
|
|
{
|
|
"evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
|
|
It is also better to have simple searches corresponding to a single entity, making this the best action.",
|
|
"score": 10
|
|
}
|
|
|
|
user:
|
|
{
|
|
"question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
|
|
"thought_1": "Let me check our inventory first to see if I have it.",
|
|
"action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
|
|
"observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
|
|
"thought_2": "Ok, I have what the user is asking. Let's tell the user.",
|
|
"action_2": {"name": "CHATBOX", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
|
|
"observation_1": "This is not what I wanted."
|
|
}
|
|
assistant:
|
|
{
|
|
"evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
|
|
not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
|
|
"score": 0
|
|
}
|
|
|
|
Let's begin!
|
|
"""
|
|
|
|
usermsg =
|
|
"""
|
|
$(JSON3.write(state[:thoughtHistory]))
|
|
"""
|
|
|
|
chathistory =
|
|
[
|
|
Dict(:name=> "system", :text=> systemmsg),
|
|
Dict(:name=> "user", :text=> usermsg)
|
|
]
|
|
|
|
# put in model format
|
|
prompt = formatLLMtext(chathistory, "llama3instruct")
|
|
prompt *=
|
|
"""
|
|
<|start_header_id|>assistant<|end_header_id|>
|
|
{
|
|
"""
|
|
|
|
pprint(prompt)
|
|
externalService = config[:externalservice][:text2textinstruct]
|
|
|
|
|
|
# apply LLM specific instruct format
|
|
externalService = config[:externalservice][:text2textinstruct]
|
|
|
|
msgMeta = GeneralUtils.generate_msgMeta(
|
|
externalService[:mqtttopic],
|
|
senderName= "evaluator",
|
|
senderId= string(uuid4()),
|
|
receiverName= "text2textinstruct",
|
|
mqttBroker= config[:mqttServerInfo][:broker],
|
|
mqttBrokerPort= config[:mqttServerInfo][:port],
|
|
)
|
|
|
|
outgoingMsg = Dict(
|
|
:msgMeta=> msgMeta,
|
|
:payload=> Dict(
|
|
:text=> prompt,
|
|
:kwargs=> Dict(
|
|
:max_tokens=> 512,
|
|
:stop=> ["<|eot_id|>"],
|
|
)
|
|
)
|
|
)
|
|
|
|
for attempt in 1:5
|
|
try
|
|
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
|
|
_responseJsonStr = response[:response][:text]
|
|
expectedJsonExample =
|
|
"""
|
|
Here is an expected JSON format:
|
|
{"evaluation": "...", "score": "..."}
|
|
"""
|
|
responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
|
|
evaluationDict = copy(JSON3.read(responseJsonStr))
|
|
|
|
# check if dict has all required value
|
|
dummya::AbstractString = evaluationDict[:evaluation]
|
|
dummyb::Integer = evaluationDict[:score]
|
|
|
|
return (evaluationDict[:evaluation], evaluationDict[:score])
|
|
catch e
|
|
io = IOBuffer()
|
|
showerror(io, e)
|
|
errorMsg = String(take!(io))
|
|
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
|
println("")
|
|
@warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
|
|
println("")
|
|
end
|
|
end
|
|
error("evaluator failed to generate an evaluation")
|
|
end
|
|
|
|
|
|
"""
|
|
|
|
# Arguments
|
|
|
|
# Return
|
|
|
|
# Example
|
|
```jldoctest
|
|
julia>
|
|
```
|
|
|
|
# TODO
|
|
- [] update docstring
|
|
- [x] implement the function
|
|
- [x] add try block. check result that it is expected before returning
|
|
|
|
# Signature
|
|
"""
|
|
function reflector(config::T1, state::T2)::String where {T1<:AbstractDict, T2<:AbstractDict}
|
|
# https://github.com/andyz245/LanguageAgentTreeSearch/blob/main/hotpot/hotpot.py
|
|
|
|
_prompt =
|
|
"""
|
|
You are a helpful sommelier working for a wine store.
|
|
Your goal is to recommend the best wine from your inventory that match the user preferences.
|
|
You will be given a question and a trajectory of the previous help you've done for a user.
|
|
You were unsuccessful in helping the user either because you guessed the wrong answer with Finish[answer], or you didn't know the user enough.
|
|
In a few sentences, Diagnose a possible reason for failure and devise a new, concise, high level plan that aims to mitigate the same failure.
|
|
Use complete sentences.
|
|
|
|
You should only respond in JSON format as describe below:
|
|
{"reflection": "your relection"}
|
|
|
|
Here are some examples:
|
|
Previous Trial:
|
|
{
|
|
"question": "Hello, I would like a get a bottle of wine",
|
|
"thought_1": "A customer wants to buy a bottle of wine. Before making a recommendation, I need to know more about their preferences.",
|
|
"action_1": {"name": "CHATBOX", "input": "What is the occasion for which you're buying this wine?"},
|
|
"observation_1": "We are holding a wedding party",
|
|
|
|
"thought_2": "A wedding party, that's a great occasion! The customer might be looking for a celebratory drink. Let me ask some more questions to narrow down the options.",
|
|
"action_2": {"name": "CHATBOX", "input": "What type of food will you be serving at the wedding?"},
|
|
"observation_2": "It will be Thai dishes.",
|
|
|
|
"thought_3": "With Thai food, I should recommend a wine that complements its spicy and savory flavors. And since it's a celebratory occasion, the customer might prefer a full-bodied wine.",
|
|
"action_3": {"name": "CHATBOX", "input": "What is your budget for this bottle of wine?"},
|
|
"observation_3": "I would spend up to 50 bucks.",
|
|
|
|
"thought_4": "Now that I have some more information, it's time to narrow down the options.",
|
|
"action_4": {"name": "winestock", "input": "red wine with full body, pairs well with spicy food, budget \$50"},
|
|
"observation_4": "I found the following wines in our stock: \n{\n 1: El Enemigo Cabernet Franc 2019\n2: Tantara Chardonnay 2017\n\n}\n",
|
|
|
|
"thought_5": "Now that I have a list of potential wines, I need to know more about the customer's taste preferences.",
|
|
"action_5": {"name": "CHATBOX", "input": "What type of wine characteristics are you looking for? (e.g. t.e.g. tannin level, sweetness, intensity, acidity)"},
|
|
"observation_5": "I like full-bodied red wine with low tannin.",
|
|
|
|
"thought_6": "Now that I have more information about the customer's preferences, it's time to make a recommendation.",
|
|
"action_6": {"name": "recommendbox", "input": "El Enemigo Cabernet Franc 2019"},
|
|
"observation_6": "I don't like the one you recommend. I want dry wine."
|
|
}
|
|
|
|
{
|
|
"reflection": "I asked the user about the occasion, food type, and budget, and then searched for wine in the inventory right away. However, I should have asked the user for the specific wine type and their preferences in order to gather more information before making a recommendation."
|
|
}
|
|
|
|
Let's begin!
|
|
|
|
Previous trial:
|
|
$(JSON3.write(state[:thoughtHistory]))
|
|
{"reflection"
|
|
"""
|
|
|
|
# apply LLM specific instruct format
|
|
externalService = config[:externalservice][:text2textinstruct]
|
|
llminfo = externalService[:llminfo]
|
|
prompt =
|
|
if llminfo[:name] == "llama3instruct"
|
|
formatLLMtext_llama3instruct("system", _prompt)
|
|
else
|
|
error("llm model name is not defied yet $(@__LINE__)")
|
|
end
|
|
|
|
msgMeta = GeneralUtils.generate_msgMeta(
|
|
a.config[:externalservice][:text2textinstruct][:mqtttopic],
|
|
senderName= "reflector",
|
|
senderId= string(uuid4()),
|
|
receiverName= "text2textinstruct",
|
|
mqttBroker= config[:mqttServerInfo][:broker],
|
|
mqttBrokerPort= config[:mqttServerInfo][:port],
|
|
)
|
|
|
|
outgoingMsg = Dict(
|
|
:msgMeta=> msgMeta,
|
|
:payload=> Dict(
|
|
:text=> prompt,
|
|
:kwargs=> Dict(
|
|
:max_tokens=> 512,
|
|
:stop=> ["<|eot_id|>"],
|
|
)
|
|
)
|
|
)
|
|
|
|
for attempt in 1:5
|
|
try
|
|
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
|
|
_responseJsonStr = response[:response][:text]
|
|
expectedJsonExample =
|
|
"""
|
|
Here is an expected JSON format:
|
|
{"reflection": "..."}
|
|
"""
|
|
responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
|
|
reflectionDict = copy(JSON3.read(responseJsonStr))
|
|
|
|
# check if dict has all required value
|
|
dummya::AbstractString = reflectionDict[:reflection]
|
|
|
|
return reflectionDict[:reflection]
|
|
catch e
|
|
io = IOBuffer()
|
|
showerror(io, e)
|
|
errorMsg = String(take!(io))
|
|
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
|
println("")
|
|
@warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
|
|
println("")
|
|
end
|
|
end
|
|
error("reflector failed to generate a thought")
|
|
end
|
|
|
|
|
|
|
|
|
|
|
|
# """ Chat with llm.
|
|
|
|
# # Arguments
|
|
# `a::agent`
|
|
# an agent
|
|
|
|
# # Return
|
|
# None
|
|
|
|
# # Example
|
|
# ```jldoctest
|
|
# julia> using JSON3, UUIDs, Dates, FileIO, MQTTClient, ChatAgent
|
|
# julia> const mqttBroker = "mqtt.yiem.cc"
|
|
# julia> mqttclient, connection = MakeConnection(mqttBroker, 1883)
|
|
# julia> tools=Dict( # update input format
|
|
# "askbox"=>Dict(
|
|
# :description => "<askbox tool description>Useful for when you need to ask the user for more context. Do not ask the user their own question.</askbox tool description>",
|
|
# :input => "<input>Input is a text in JSON format.</input><input example>{\"Q1\": \"How are you doing?\", \"Q2\": \"How may I help you?\"}</input example>",
|
|
# :output => "" ,
|
|
# :func => nothing,
|
|
# ),
|
|
# )
|
|
# julia> msgMeta = Dict(
|
|
# :msgPurpose=> "updateStatus",
|
|
# :from=> "agent",
|
|
# :to=> "llmAI",
|
|
# :requestresponse=> "request",
|
|
# :sendto=> "", # destination topic
|
|
# :replyTo=> "agent/api/v0.1.0/txt/response", # requester ask responseer to send reply to this topic
|
|
# :repondToMsgId=> "", # responseer is responseing to this msg id
|
|
# :taskstatus=> "", # "complete", "fail", "waiting" or other status
|
|
# :timestamp=> Dates.now(),
|
|
# :msgId=> "$(uuid4())",
|
|
# )
|
|
# julia> a = ChatAgent.agentReflex(
|
|
# "Jene",
|
|
# mqttclient,
|
|
# msgMeta,
|
|
# agentConfigTopic, # I need a function to send msg to config topic to get load balancer
|
|
# role=:sommelier,
|
|
# tools=tools
|
|
# )
|
|
# julia> newAgent = ChatAgent.agentReact(agent)
|
|
# julia> response = ChatAgent.conversation(newAgent, "Hi! how are you?")
|
|
# ```
|
|
|
|
# # TODO
|
|
# - [] update docstring
|
|
# - [x] MCTS() for planning
|
|
# - [] add recap to initialState for earlier completed question
|
|
# - [WORKING] conversation loop
|
|
|
|
# # Signature
|
|
# """
|
|
# function conversation(a::T, userinput::Dict) where {T<:agent}
|
|
# config = deepcopy(a.config)
|
|
# pprint(config)
|
|
# if userinput[:text] == "newtopic"
|
|
# clearhistory(a)
|
|
# return "Okay. What shall we talk about?"
|
|
# else
|
|
# # add usermsg to a.chathistory
|
|
# addNewMessage(a, "user", userinput[:text])
|
|
|
|
# if isempty(a.plan[:currenttrajectory])
|
|
|
|
# # initial state
|
|
# a.plan[:currenttrajectory] = Dict{Symbol, Any}(
|
|
# # deepcopy the info to prevent modifying the info unintentionally during MCTS planning
|
|
# :customerinfo=> deepcopy(a.keywordinfo[:customerinfo]),
|
|
# :storeinfo=> deepcopy(a.keywordinfo[:storeinfo]),
|
|
# :userselect=> nothing,
|
|
# :reward=> 0,
|
|
# :isterminal=> false,
|
|
# :evaluation=> nothing,
|
|
# :lesson=> nothing,
|
|
|
|
# :totalTrajectoryReward=> nothing,
|
|
|
|
# # contain question, thought_1, action_1, observation_1, thought_2, ...
|
|
# :thoughtHistory=> OrderedDict{Symbol, Any}(
|
|
# #[] :recap=>,
|
|
# :question=> userinput[:text],
|
|
# ),
|
|
|
|
# # store conversation for virtual customer because the virtual customer agent is just
|
|
# # a function and stateless.
|
|
# :virtualCustomerChatHistory=> Vector{Dict{Symbol, Any}}(
|
|
# [Dict(:name=> "user", :text=> userinput[:text])]
|
|
# ),
|
|
# )
|
|
# else
|
|
# _, a.plan[:currenttrajectory] = makeNewState(a.plan[:currenttrajectory],
|
|
# a.plan[:activeplan][:thoughtHistory], userinput[:text], userinput[:select],
|
|
# userinput[:reward], userinput[:isterminal])
|
|
# end
|
|
# end
|
|
|
|
# while true
|
|
# bestNextState, besttrajectory = LLMMCTS.runMCTS(a.plan[:currenttrajectory],
|
|
# transition, config, decisionMaker, evaluator, reflector;
|
|
# totalsample=2, maxDepth=3, maxiterations=3, explorationweight=1.0)
|
|
# a.plan[:activeplan] = bestNextState
|
|
|
|
# latestActionKey, latestActionIndice =
|
|
# GeneralUtils.findHighestIndexKey(bestNextState[:thoughtHistory], "action")
|
|
# actionname = bestNextState[:thoughtHistory][latestActionKey][:name]
|
|
# actioninput = bestNextState[:thoughtHistory][latestActionKey][:input]
|
|
|
|
# # transition
|
|
# if actionname == "CHATBOX"
|
|
# # add usermsg to a.chathistory
|
|
# addNewMessage(a, "assistant", actioninput)
|
|
# return actioninput
|
|
# elseif actionname == "recommendbox"
|
|
# # add usermsg to a.chathistory
|
|
# addNewMessage(a, "assistant", actioninput)
|
|
# return actioninput
|
|
# else
|
|
# _, a.plan[:currenttrajectory] = transition(a, a.plan[:currenttrajectory], a.plan[:activeplan])
|
|
# end
|
|
# end
|
|
# end
|
|
|
|
|
|
|
|
""" Chat with llm.
|
|
|
|
# Arguments
|
|
`a::agent`
|
|
an agent
|
|
|
|
# Return
|
|
None
|
|
|
|
# Example
|
|
```jldoctest
|
|
julia> using JSON3, UUIDs, Dates, FileIO, MQTTClient, ChatAgent
|
|
julia> const mqttBroker = "mqtt.yiem.cc"
|
|
julia> mqttclient, connection = MakeConnection(mqttBroker, 1883)
|
|
julia> tools=Dict( # update input format
|
|
"askbox"=>Dict(
|
|
:description => "<askbox tool description>Useful for when you need to ask the user for more context. Do not ask the user their own question.</askbox tool description>",
|
|
:input => "<input>Input is a text in JSON format.</input><input example>{\"Q1\": \"How are you doing?\", \"Q2\": \"How may I help you?\"}</input example>",
|
|
:output => "" ,
|
|
:func => nothing,
|
|
),
|
|
)
|
|
julia> msgMeta = Dict(
|
|
:msgPurpose=> "updateStatus",
|
|
:from=> "agent",
|
|
:to=> "llmAI",
|
|
:requestresponse=> "request",
|
|
:sendto=> "", # destination topic
|
|
:replyTo=> "agent/api/v0.1.0/txt/response", # requester ask responseer to send reply to this topic
|
|
:repondToMsgId=> "", # responseer is responseing to this msg id
|
|
:taskstatus=> "", # "complete", "fail", "waiting" or other status
|
|
:timestamp=> Dates.now(),
|
|
:msgId=> "$(uuid4())",
|
|
)
|
|
julia> a = ChatAgent.agentReflex(
|
|
"Jene",
|
|
mqttclient,
|
|
msgMeta,
|
|
agentConfigTopic, # I need a function to send msg to config topic to get load balancer
|
|
role=:sommelier,
|
|
tools=tools
|
|
)
|
|
julia> newAgent = ChatAgent.agentReact(agent)
|
|
julia> response = ChatAgent.conversation(newAgent, "Hi! how are you?")
|
|
```
|
|
|
|
# TODO
|
|
- [] update docstring
|
|
- [] add recap to initialState for earlier completed question
|
|
|
|
# Signature
|
|
"""
|
|
function conversation(a::T, userinput::Dict) where {T<:agent}
|
|
|
|
# place holder
|
|
actionname = nothing
|
|
result = nothing
|
|
chatresponse = nothing
|
|
|
|
if userinput[:text] == "newtopic"
|
|
clearhistory(a)
|
|
return "Okay. What shall we talk about?"
|
|
else
|
|
# add usermsg to a.chathistory
|
|
addNewMessage(a, "user", userinput[:text])
|
|
|
|
# add user activity to events memory
|
|
push!(a.memory[:events],
|
|
eventdict(;
|
|
event_description= "the user talks to the assistant.",
|
|
timestamp= Dates.now(),
|
|
subject= "user",
|
|
action_or_dialogue= userinput[:text],
|
|
)
|
|
)
|
|
|
|
# use dummy memory to check generatechat() for halucination (checking inventory)
|
|
for i in 1:3
|
|
actionname, result = think(a)
|
|
if actionname == "CHATBOX" || actionname == "PRESENTBOX" || actionname == "ENDCONVERSATION"
|
|
break
|
|
end
|
|
end
|
|
|
|
# thought will be added to chat model via context
|
|
chatresponse = generatechat(a.memory, a.chathistory, a.text2textInstructLLM)
|
|
|
|
# some time LLM said to user that it (checking inventory) but it is not.
|
|
# if chatresponse want to check inventory but think() didn't checkinventory then do it
|
|
llmCheckInv = occursin("(check", chatresponse) || occursin("*check", chatresponse) ||
|
|
occursin("inventory)", chatresponse) || occursin("inventory*", chatresponse)
|
|
|
|
if llmCheckInv && actionname != "checkinventory"
|
|
actionname, result = forceInventoryCheck(a)
|
|
push!(a.memory[:shortmem], Dict(Symbol(actionname)=> result))
|
|
|
|
# generate chatresponse again because we have force inventory check
|
|
chatresponse = generatechat(a.memory, a.chathistory, a.text2textInstructLLM)
|
|
else
|
|
# since chatresponse does not halucinate i.e. no (check inventory), it does not need
|
|
# to regenerate again and con be use directly
|
|
end
|
|
|
|
addNewMessage(a, "assistant", chatresponse)
|
|
|
|
push!(a.memory[:events],
|
|
eventdict(;
|
|
event_description= "the assistant talks to the user.",
|
|
timestamp= Dates.now(),
|
|
subject= "assistant",
|
|
action_or_dialogue= chatresponse,
|
|
)
|
|
)
|
|
return chatresponse
|
|
end
|
|
end
|
|
|
|
"""
|
|
|
|
# Arguments
|
|
|
|
# Return
|
|
|
|
# Example
|
|
```jldoctest
|
|
julia>
|
|
```
|
|
|
|
# TODO
|
|
- [] update docstring
|
|
|
|
# Signature
|
|
"""
|
|
function think(a::T)::NamedTuple{(:actionname, :result), Tuple{String, String}} where {T<:agent}
|
|
|
|
|
|
a.memory[:recap] = generateSituationReport(a, a.text2textInstructLLM)
|
|
a.memory[:QandA] = generatequestion(a, a.text2textInstructLLM)
|
|
# after the user selected their wine. No question should be asked
|
|
# a.memory[:QandA] =
|
|
# if occursin("None", a.memory[:sitrep][:wine_selected])
|
|
# generatequestion(a, a.text2textInstructLLM)
|
|
# else
|
|
# generatequestion(a, a.text2textInstructLLM) #[PENDING] to be removed. this is just for explor
|
|
# "None"
|
|
# end
|
|
|
|
thoughtDict = decisionMaker(a)
|
|
|
|
actionname = thoughtDict[:action_name]
|
|
actioninput = thoughtDict[:action_input]
|
|
|
|
# map action and input() to llm function
|
|
response =
|
|
if actionname == "CHATBOX"
|
|
(result=actioninput, errormsg=nothing, success=true)
|
|
elseif actionname == "CHECKINVENTORY"
|
|
checkinventory(a, actioninput)
|
|
elseif actionname == "PRESENTBOX"
|
|
x = """
|
|
1) Introduce $actioninput in details for the user to choose."
|
|
2) compare each option against the others in details and explain why each one is a suitable match for the user's specific needs.
|
|
"""
|
|
(result=x, errormsg=nothing, success=true)
|
|
elseif actionname == "ENDCONVERSATION"
|
|
x = "Conclude the conversation, thanks the user then goodbye and inviting them to return next time."
|
|
(result=x, errormsg=nothing, success=true)
|
|
else
|
|
error("undefined LLM function. Requesting $actionname")
|
|
end
|
|
|
|
# this section allow LLM functions above to have different return values.
|
|
result = haskey(response, :result) ? response[:result] : nothing
|
|
select = haskey(response, :select) ? response[:select] : nothing
|
|
reward::Integer = haskey(response, :reward) ? response[:reward] : 0
|
|
isterminal::Bool = haskey(response, :isterminal) ? response[:isterminal] : false
|
|
errormsg::Union{AbstractString, Nothing} = haskey(response, :errormsg) ? response[:errormsg] : nothing
|
|
success::Bool = haskey(response, :success) ? response[:success] : false
|
|
|
|
# manage memory (pass msg to generatechat)
|
|
if actionname == "CHATBOX"
|
|
a.memory[:CHATBOX] = result
|
|
elseif actionname == "CHECKINVENTORY"
|
|
push!(a.memory[:shortmem], Dict(Symbol(actionname)=> result))
|
|
elseif actionname == "PRESENTBOX" # tell the generatechat()
|
|
a.memory[:CHATBOX] = result
|
|
elseif actionname == "ENDCONVERSATION"
|
|
a.memory[:CHATBOX] = result
|
|
else
|
|
error("condition is not defined")
|
|
end
|
|
|
|
return (actionname=actionname, result=result)
|
|
end
|
|
|
|
|
|
""" Force to think and check inventory
|
|
|
|
"""
|
|
function forceInventoryCheck(a::T)::NamedTuple{(:actionname, :result), Tuple{String, String}} where {T<:agent}
|
|
println("--> forceInventoryCheck()")
|
|
thoughtDict = thinkCheckInventory(a)
|
|
actionname = thoughtDict[:action_name]
|
|
actioninput = thoughtDict[:action_input]
|
|
|
|
# map action and input() to llm function
|
|
response =
|
|
if actionname == "CHECKINVENTORY"
|
|
checkinventory(a, actioninput)
|
|
else
|
|
error("undefined LLM function. Requesting $actionname")
|
|
end
|
|
|
|
# this section allow LLM functions above to have different return values.
|
|
result = haskey(response, :result) ? response[:result] : nothing
|
|
select = haskey(response, :select) ? response[:select] : nothing
|
|
reward::Integer = haskey(response, :reward) ? response[:reward] : 0
|
|
isterminal::Bool = haskey(response, :isterminal) ? response[:isterminal] : false
|
|
errormsg::Union{AbstractString, Nothing} = haskey(response, :errormsg) ? response[:errormsg] : nothing
|
|
success::Bool = haskey(response, :success) ? response[:success] : false
|
|
|
|
return (actionname=actionname, result=result)
|
|
end
|
|
|
|
|
|
|
|
|
|
function thinkCheckInventory(a::T)::Dict{Symbol, Any} where {T<:agent}
|
|
|
|
systemmsg =
|
|
"""
|
|
You are a helpful sommelier working for a wine store.
|
|
Your task is to help the user choose the best wine that match the user preferences from your inventory.
|
|
|
|
Definitions:
|
|
- observation: result of the preceding immediate action.
|
|
|
|
At each round of conversation, the user will give you the current situation:
|
|
Context: ...
|
|
Your earlier conversation with the user: ...
|
|
|
|
You must follow the following guidelines:
|
|
- Check inventory immediately based on what you know about the user.
|
|
|
|
You should then respond to the user with interleaving Thought, Plan, Action and Observation:
|
|
- thought:
|
|
1) State your reasoning about the current situation.
|
|
- plan: Based on the current situation, state a complete plan to complete the task. Be specific.
|
|
- action_name (Must be aligned with your plan): Can be one of the following functions:
|
|
1) CHECKINVENTORY[query], which you can use to check info about wine in your inventory. "query" is a search term in verbal English.
|
|
Good query example: black car with a stereo, 200 mile range and an electric motor.
|
|
Good query example: How many car brand are from Asia?
|
|
- action_input: input to the action
|
|
|
|
You should only respond in format as described below:
|
|
thought: ...
|
|
plan: ...
|
|
action_name: ...
|
|
action_input: ...
|
|
|
|
Let's begin!
|
|
"""
|
|
|
|
usermsg =
|
|
"""
|
|
Context: None
|
|
Your earlier conversation with the user: $(vectorOfDictToText(a.chathistory))
|
|
"""
|
|
|
|
_prompt =
|
|
[
|
|
Dict(:name=> "system", :text=> systemmsg),
|
|
Dict(:name=> "user", :text=> usermsg)
|
|
]
|
|
|
|
# put in model format
|
|
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
|
prompt *=
|
|
"""
|
|
<|start_header_id|>assistant<|end_header_id|>
|
|
"""
|
|
response = nothing # store for show when error msg show up
|
|
for attempt in 1:10
|
|
try
|
|
response = a.text2textInstructLLM(prompt)
|
|
responsedict = GeneralUtils.textToDict(response,
|
|
["thought", "plan", "action_name", "action_input"],
|
|
rightmarker=":", symbolkey=true)
|
|
|
|
if responsedict[:action_name] ∉ ["CHECKINVENTORY"]
|
|
error("decisionMaker didn't use the given functions ", @__LINE__)
|
|
end
|
|
|
|
for i ∈ [:thought, :plan, :action_name]
|
|
if length(JSON3.write(responsedict[i])) == 0
|
|
error("$i is empty ", @__LINE__)
|
|
end
|
|
end
|
|
|
|
# check if there are more than 1 key per categories
|
|
for i ∈ [:thought, :plan, :action_name, :action_input]
|
|
matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
|
|
if length(matchkeys) > 1
|
|
error("DecisionMaker has more than one key per categories")
|
|
end
|
|
end
|
|
|
|
return responsedict
|
|
catch e
|
|
io = IOBuffer()
|
|
showerror(io, e)
|
|
errorMsg = String(take!(io))
|
|
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
|
println("")
|
|
println("Attempt $attempt. Error occurred: $errorMsg\n$st")
|
|
println("")
|
|
end
|
|
end
|
|
error("DecisionMaker failed to generate a thought ", response)
|
|
end
|
|
|
|
|
|
"""
|
|
|
|
# Arguments
|
|
- `a::T1`
|
|
one of ChatAgent's agent.
|
|
- `input::T2`
|
|
# Return
|
|
A JSON string of available wine
|
|
|
|
# Example
|
|
```jldoctest
|
|
julia>
|
|
```
|
|
|
|
# TODO
|
|
- [] update docs
|
|
- []
|
|
|
|
# Signature
|
|
"""
|
|
function generatechat(memory::Dict, chathistory::Vector, text2textInstructLLM::Function)
|
|
systemmsg =
|
|
"""
|
|
You are a helpful assistant acting as a polite, website-based sommelier for an online wine store.
|
|
Your goal is: Recommend the best wines from your inventory that align with the user's preferences.
|
|
|
|
Your responsibility includes:
|
|
1) Given the situation, convey your thoughts to the user.
|
|
|
|
Your responsibility does not include:
|
|
1) Processing sales orders or engaging in any other sales-related activities.
|
|
|
|
At each round of conversation, you will be given the current situation:
|
|
Your conversation with the user: ...
|
|
Your thoughts: Your current thoughts in your mind.
|
|
Context: ...
|
|
|
|
You MUST follow the following guidelines:
|
|
- Do not offer additional services you didn't thought.
|
|
|
|
You should follow the following guidelines:
|
|
- Focus on the latest conversation.
|
|
|
|
You should then respond to the user with:
|
|
1) chat: Given the situation, what would you say to convey your thoughts to the user?
|
|
2) mentioning_wine: Are you mentioning specific wine name to the user? Can be "Yes" or "No"
|
|
3) note: Put everything you want to add here
|
|
|
|
You should only respond in format as described below:
|
|
chat: ...
|
|
mentioning_wine: ...
|
|
note: ...
|
|
|
|
Let's begin!
|
|
"""
|
|
|
|
context =
|
|
if length(memory[:shortmem]) > 0 #[WORKING] add with number order 1), 2)
|
|
vectorOfDictToText(memory[:shortmem], withkey=false)
|
|
else
|
|
""
|
|
end
|
|
|
|
chathistory = vectorOfDictToText(chathistory)
|
|
errornote = ""
|
|
response = nothing # placeholder for show when error msg show up
|
|
|
|
for attempt in 1:10
|
|
usermsg =
|
|
"""
|
|
Your conversation with the user: $chathistory
|
|
$context
|
|
Your thoughts: $(memory[:CHATBOX])
|
|
$errornote
|
|
"""
|
|
|
|
_prompt =
|
|
[
|
|
Dict(:name=> "system", :text=> systemmsg),
|
|
Dict(:name=> "user", :text=> usermsg)
|
|
]
|
|
|
|
# put in model format
|
|
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
|
prompt *=
|
|
"""
|
|
<|start_header_id|>assistant<|end_header_id|>
|
|
"""
|
|
|
|
try
|
|
response = text2textInstructLLM(prompt)
|
|
responsedict = GeneralUtils.textToDict(response,["chat", "mentioning_wine", "note"],
|
|
rightmarker=":", symbolkey=true)
|
|
|
|
for i ∈ [:chat]
|
|
if length(JSON3.write(responsedict[i])) == 0
|
|
error("$i is empty ", @__LINE__)
|
|
end
|
|
end
|
|
|
|
# check if there are more than 1 key per categories
|
|
for i ∈ [:chat]
|
|
matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
|
|
if length(matchkeys) > 1
|
|
error("generatechat has more than one key per categories")
|
|
end
|
|
end
|
|
|
|
# check if Context: is in chat
|
|
if occursin("Context:", responsedict[:chat])
|
|
error("Context: is in text. This is not allowed")
|
|
end
|
|
|
|
println("")
|
|
println("--> generatechat() ", @__FILE__, " ", @__LINE__)
|
|
pprintln(responsedict)
|
|
|
|
# check if LLM recommend wine before checking inventory
|
|
isMemEmpty = isempty(memory[:shortmem])
|
|
if occursin("Yes", responsedict[:mentioning_wine]) && isMemEmpty
|
|
errornote = "Note: You can't recommend wines yet. You must check your inventory before recommending wine to the user."
|
|
error( "You must check your inventory before recommending wine")
|
|
else
|
|
errornote = ""
|
|
end
|
|
|
|
memory[:CHATBOX] = "" # delete content because it no longer used.
|
|
delete!(responsedict, :mentioning_wine)
|
|
result = responsedict[:chat]
|
|
|
|
return result
|
|
catch e
|
|
io = IOBuffer()
|
|
showerror(io, e)
|
|
errorMsg = String(take!(io))
|
|
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
|
println("")
|
|
println("Attempt $attempt. Error occurred: $errorMsg\n$st")
|
|
println("")
|
|
end
|
|
end
|
|
error("generatechat failed to generate an evaluation")
|
|
end
|
|
# function generatechat(memory::Dict, chathistory::Vector, text2textInstructLLM::Function)
|
|
# systemmsg =
|
|
# """
|
|
# You are a helpful assistant acting as a polite, website-based sommelier for an online wine store.
|
|
# Your goal is: Recommend the best wines from your inventory that align with the user's preferences.
|
|
|
|
# Your responsibility includes:
|
|
# 1) Given the situation, convey your thoughts to the user
|
|
|
|
# At each round of conversation, you will be given the current situation:
|
|
# Your conversation with the user: ...
|
|
# Your thoughts: Your current thoughts in your mind
|
|
# Context: ...
|
|
|
|
# You MUST follow the following guidelines:
|
|
# - Do not offer additional services you didn't thought.
|
|
|
|
# You should follow the following guidelines:
|
|
# - Focus on the latest conversation.
|
|
|
|
# You should then respond to the user with:
|
|
# 1) chat: Given the situation, convey your thoughts to the user
|
|
# 2) mentioning_wine: Are you mentioning specific wine name to the user? Can be "Yes" or "No"
|
|
|
|
# You should only respond in format as described below:
|
|
# chat: ...
|
|
# mentioning_wine: ...
|
|
|
|
# Let's begin!
|
|
# """
|
|
|
|
# context =
|
|
# if length(memory[:shortmem]) > 0 #[WORKING] add with number order 1), 2)
|
|
# vectorOfDictToText(memory[:shortmem], withkey=false)
|
|
# else
|
|
# ""
|
|
# end
|
|
|
|
# chathistory = vectorOfDictToText(chathistory)
|
|
# errornote = ""
|
|
# response = nothing # placeholder for show when error msg show up
|
|
|
|
# for attempt in 1:10
|
|
# usermsg =
|
|
# """
|
|
# Your conversation with the user: $chathistory
|
|
# $context
|
|
# Your thoughts: $(memory[:CHATBOX])
|
|
# $errornote
|
|
# """
|
|
|
|
# _prompt =
|
|
# [
|
|
# Dict(:name=> "system", :text=> systemmsg),
|
|
# Dict(:name=> "user", :text=> usermsg)
|
|
# ]
|
|
|
|
# # put in model format
|
|
# prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
|
# prompt *=
|
|
# """
|
|
# <|start_header_id|>assistant<|end_header_id|>
|
|
# """
|
|
|
|
# try
|
|
# response = text2textInstructLLM(prompt)
|
|
# responsedict = GeneralUtils.textToDict(response,["chat", "mentioning_wine"],
|
|
# rightmarker=":", symbolkey=true)
|
|
|
|
# for i ∈ [:chat]
|
|
# if length(JSON3.write(responsedict[i])) == 0
|
|
# error("$i is empty ", @__LINE__)
|
|
# end
|
|
# end
|
|
|
|
# # check if there are more than 1 key per categories
|
|
# for i ∈ [:chat]
|
|
# matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
|
|
# if length(matchkeys) > 1
|
|
# error("generatechat has more than one key per categories")
|
|
# end
|
|
# end
|
|
|
|
# # check if Context: is in chat
|
|
# if occursin("Context:", responsedict[:chat])
|
|
# error("Context: is in text. This is not allowed")
|
|
# end
|
|
|
|
# println("")
|
|
# println("--> generatechat() ", @__FILE__, " ", @__LINE__)
|
|
# pprintln(responsedict)
|
|
|
|
# # check if LLM recommend wine before checking inventory
|
|
# isMemEmpty = isempty(memory[:shortmem])
|
|
# if occursin("Yes", responsedict[:mentioning_wine]) && isMemEmpty
|
|
# errornote = "Note: You can't recommend wines yet. You must check your inventory before recommending wine to the user."
|
|
# error( "You must check your inventory before recommending wine")
|
|
# else
|
|
# errornote = ""
|
|
# end
|
|
|
|
# memory[:CHATBOX] = "" # delete content because it no longer used.
|
|
# delete!(responsedict, :mentioning_wine)
|
|
# result = responsedict[:chat]
|
|
|
|
# return result
|
|
# catch e
|
|
# io = IOBuffer()
|
|
# showerror(io, e)
|
|
# errorMsg = String(take!(io))
|
|
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
|
# println("")
|
|
# println("Attempt $attempt. Error occurred: $errorMsg\n$st")
|
|
# println("")
|
|
# end
|
|
# end
|
|
# error("generatechat failed to generate an evaluation")
|
|
# end
|
|
|
|
|
|
function generatequestion(a, text2textInstructLLM::Function)::String
|
|
|
|
# systemmsg =
|
|
# """
|
|
# You are a helpful assistant acting as a polite, website-based sommelier for an online wine store.
|
|
# Your goal is: Help the user select the best wines from your inventory that align with the user's preferences.
|
|
|
|
# Your responsibility includes:
|
|
# 1) Self questioning about the current situation
|
|
# 2) Thanks the user when they are done choosing wines and invite them to comeback next time
|
|
|
|
# Your responsibility does not include:
|
|
# 1) Processing sales orders or engaging in any other sales-related activities.
|
|
|
|
# At each round of conversation, you will be given the current situation:
|
|
# Your conversation with the user: ...
|
|
# Context: ...
|
|
|
|
# You must follow the following guidelines:
|
|
# - Your question should be specific, self-contained and not require any additional context.
|
|
# - Do not generate any question or comments at the end.
|
|
# - Once the user has selected their wine, ask the user if they need any further assistance. Do not offer any additional services. If the user doesn't need any further assistance, say goodbye and invite them to come back next time.
|
|
|
|
# You should follow the following guidelines:
|
|
# - Focus on the latest conversation.
|
|
# - Do not ask the user about wine's flavor e.g. floral, citrusy, nutty or some thing similar as these terms cannot be used to search the database.
|
|
# - All wines in your inventory are always in stock.
|
|
|
|
# You should then respond to the user with:
|
|
# 1) Reasoning: State your detailed reasoning of the current situation
|
|
# 2) Q: Based on the current situation, ask yourself at least two, but no more than five, questions about what you need to do to achieve the goal.
|
|
# 3) A: Based on the current situation, answer to yourself the best you can
|
|
|
|
# You must only respond in format as described below:
|
|
# Reasoning: ...
|
|
# Q 1: ...
|
|
# A 1: ...
|
|
# Q 2: ...
|
|
# A 2: ...
|
|
# Q 3: ...
|
|
# A 3: ...
|
|
# ...
|
|
|
|
# Let's begin!
|
|
# """
|
|
|
|
systemmsg =
|
|
"""
|
|
You are a helpful assistant acting as a polite, website-based sommelier for an online wine store.
|
|
Your goal includes:
|
|
1) Help the user select the best wines from your inventory that align with the user's preferences.
|
|
|
|
Your responsibility includes:
|
|
1) Ask yourself what to do about the current situation
|
|
2) Thanks the user when they are done choosing wines and invite them to comeback next time
|
|
|
|
Your responsibility does not include:
|
|
1) Processing sales orders or engaging in any other sales-related activities.
|
|
|
|
At each round of conversation, you will be given the current situation:
|
|
Your conversation with the user: ...
|
|
Context: ...
|
|
|
|
You must follow the following guidelines:
|
|
- Your question should be specific, self-contained and not require any additional context.
|
|
- Do not generate any question or comments at the end.
|
|
- Once the user has selected their wine, ask the user if they need any further assistance. Do not offer any additional services. If the user doesn't need any further assistance, say goodbye and invite them to come back next time.
|
|
|
|
You should follow the following guidelines:
|
|
- Focus on the latest conversation.
|
|
- Do not ask the user about wine's flavor e.g. floral, citrusy, nutty or some thing similar as these terms cannot be used to search the database.
|
|
- All wines in your inventory are always in stock.
|
|
|
|
You should then respond to the user with:
|
|
1) thought: State your reasoning about the current situation
|
|
2) Q: Given the situation, "ask yourself" about the situation at least two, but no more than five, questions.
|
|
3) A: Given the situation, "answer to yourself" the best you can
|
|
4) note: Put everything you want to add here
|
|
|
|
Here are some examples:
|
|
Q: The user is asking for a cappuccino. Do I have it at my cafe?
|
|
A: No I don't.
|
|
Q: Since I don't have a cappuccino but I have a Late, should I ask if they are okay with that?
|
|
A: Yes, I should.
|
|
Q: Are they allergic to milk?
|
|
A: Since they mentioned a cappuccino before, I think they are not allergic to milk.
|
|
Q: Do I search the database yet?
|
|
A: I've searched the database and found ...
|
|
Q: Did I introduce the wines to the user yet?
|
|
A: Not yet. I will introduce the wines now.
|
|
|
|
You must only respond in format as described below:
|
|
thought: ... , In order to ..., I should ask myself the following questions.
|
|
Q_1: ...
|
|
A_1: ...
|
|
Q_2: ...
|
|
A_2: ...
|
|
Q_3: ...
|
|
A_3: ...
|
|
...
|
|
note: ...
|
|
|
|
Let's begin!
|
|
"""
|
|
|
|
context =
|
|
if length(a.memory[:shortmem]) > 0
|
|
vectorOfDictToText(a.memory[:shortmem], withkey=false)
|
|
else
|
|
""
|
|
end
|
|
chathistory = vectorOfDictToText(a.chathistory)
|
|
errornote = ""
|
|
response = nothing # store for show when error msg show up
|
|
|
|
for attempt in 1:10
|
|
usermsg =
|
|
"""
|
|
Your conversation with the user: $chathistory
|
|
Recap: $(a.memory[:recap])
|
|
$context
|
|
$errornote
|
|
"""
|
|
|
|
_prompt =
|
|
[
|
|
Dict(:name=> "system", :text=> systemmsg),
|
|
Dict(:name=> "user", :text=> usermsg)
|
|
]
|
|
|
|
# put in model format
|
|
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
|
prompt *=
|
|
"""
|
|
<|start_header_id|>assistant<|end_header_id|>
|
|
"""
|
|
|
|
try
|
|
response = text2textInstructLLM(prompt)
|
|
q_number = count("Q_", response)
|
|
if q_number < 1
|
|
error("too few questions only $q_number questions are generated ", @__FILE__, " ", @__LINE__)
|
|
end
|
|
# response = string(split(response, "Please")[1]) # LLM usually add comments which is no need.
|
|
responsedict = GeneralUtils.textToDict(response,
|
|
["thought", "Q_1", "note"],
|
|
rightmarker=":", symbolkey=true)
|
|
response = "Q_1: " * responsedict[:Q_1]
|
|
println("--> generatequestion ", @__FILE__, " ", @__LINE__)
|
|
pprintln(response)
|
|
return response
|
|
catch e
|
|
io = IOBuffer()
|
|
showerror(io, e)
|
|
errorMsg = String(take!(io))
|
|
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
|
println("")
|
|
println("Attempt $attempt. Error occurred: $errorMsg\n$st")
|
|
println("")
|
|
end
|
|
end
|
|
error("generatequestion failed to generate a thought ", response)
|
|
end
|
|
|
|
function generateSituationReport(a, text2textInstructLLM::Function)::Dict
|
|
|
|
# systemmsg =
|
|
# """
|
|
# You are a helpful assistant in the given timeline.
|
|
# You task is to writes a situational report on the current situation.
|
|
|
|
# At each round of conversation, you will be given the current situation:
|
|
# Timeline: ...
|
|
# Context: ...
|
|
|
|
# You should then respond to the user with:
|
|
# 1) report: State your detailed situational report on the current situation.
|
|
# 2) wine_selected: Indicates whether the user selected wine. It can be the name of the selected wine or "None"
|
|
|
|
# You must only respond in format as described below:
|
|
# report: ...
|
|
# wine_selected: ...
|
|
|
|
# Let's begin!
|
|
# """
|
|
|
|
systemmsg =
|
|
"""
|
|
You are the assistant being in the given events.
|
|
Your task is to writes a summary for each event in an ongoing series.
|
|
|
|
At each round of conversation, you will be given the situation:
|
|
Total events: number of events you need to summarize.
|
|
Events timeline: ...
|
|
Context: ...
|
|
|
|
You should then respond to the user with:
|
|
event: a detailed summary for each event without exaggerated details.
|
|
|
|
You must only respond in format as described below:
|
|
event_1: ...
|
|
event_2: ...
|
|
...
|
|
|
|
Here are some examples:
|
|
event_1: The user ask me about where to buy a toy.
|
|
event_2: I told the user to go to the store at 2nd floor.
|
|
|
|
Let's begin!
|
|
"""
|
|
|
|
timeline = ""
|
|
for (i, event) in enumerate(a.memory[:events])
|
|
if event[:outcome] === nothing
|
|
timeline *= "$i) $(event[:subject])> $(event[:action_or_dialogue])\n"
|
|
else
|
|
timeline *= "$i) $(event[:subject])> $(event[:action_or_dialogue]) $(event[:outcome])\n"
|
|
end
|
|
end
|
|
|
|
errornote = ""
|
|
response = nothing # store for show when error msg show up
|
|
|
|
for attempt in 1:10
|
|
usermsg =
|
|
"""
|
|
Total events: $(length(a.memory[:events]))
|
|
Events timeline: $timeline
|
|
$errornote
|
|
"""
|
|
|
|
_prompt =
|
|
[
|
|
Dict(:name=> "system", :text=> systemmsg),
|
|
Dict(:name=> "user", :text=> usermsg)
|
|
]
|
|
|
|
# put in model format
|
|
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
|
prompt *=
|
|
"""
|
|
<|start_header_id|>assistant<|end_header_id|>
|
|
"""
|
|
|
|
response = text2textInstructLLM(prompt)
|
|
# responsedict = GeneralUtils.textToDict(response,
|
|
# ["summary", "presented", "selected"],
|
|
# rightmarker=":", symbolkey=true)
|
|
# println("--> generateSituationReport ", @__FILE__, " ", @__LINE__)
|
|
pprintln(response)
|
|
|
|
eventcount = count("event_", response)
|
|
|
|
if eventcount < length(a.memory[:events])
|
|
errornote = "Note: You need to summarize every events."
|
|
println("the summary covers $eventcount/$(length(a.memory[:events])) ", @__FILE__, " ", @__LINE__)
|
|
else
|
|
return Dict(:recap=> response)
|
|
end
|
|
end
|
|
error("generateSituationReport failed to generate a thought ", response)
|
|
end
|
|
# function generateSituationReport(a, text2textInstructLLM::Function)::Dict
|
|
|
|
# # systemmsg =
|
|
# # """
|
|
# # You are a helpful assistant in the given timeline.
|
|
# # You task is to writes a situational report on the current situation.
|
|
|
|
# # At each round of conversation, you will be given the current situation:
|
|
# # Timeline: ...
|
|
# # Context: ...
|
|
|
|
# # You should then respond to the user with:
|
|
# # 1) report: State your detailed situational report on the current situation.
|
|
# # 2) wine_selected: Indicates whether the user selected wine. It can be the name of the selected wine or "None"
|
|
|
|
# # You must only respond in format as described below:
|
|
# # report: ...
|
|
# # wine_selected: ...
|
|
|
|
# # Let's begin!
|
|
# # """
|
|
|
|
# systemmsg =
|
|
# """
|
|
# You are the assistant being in the given events.
|
|
# Your task is to writes a summary of each event.
|
|
|
|
# At each round of conversation, you will be given the situation:
|
|
# Events timeline: ...
|
|
# Context: ...
|
|
|
|
# You should then respond to the user with:
|
|
# event: a detailed summary of each event without exaggerated details.
|
|
|
|
# You must only respond in format as described below:
|
|
# event 1: ...
|
|
# event 2: ...
|
|
# ...
|
|
|
|
# Here are some examples:
|
|
# event 1: The user ask me about where to buy a toy.
|
|
# event 2: I told the user to go to the store at 2nd floor.
|
|
|
|
# Let's begin!
|
|
# """
|
|
|
|
# timeline = ""
|
|
# for (i, event) in enumerate(a.memory[:events])
|
|
# if event[:outcome] === nothing
|
|
# timeline *= "$i) $(event[:subject])> $(event[:action_or_dialogue])\n"
|
|
# else
|
|
# timeline *= "$i) $(event[:subject])> $(event[:action_or_dialogue]) $(event[:outcome])\n"
|
|
# end
|
|
# end
|
|
|
|
# errornote = ""
|
|
# response = nothing # store for show when error msg show up
|
|
|
|
# for attempt in 1:10
|
|
# usermsg =
|
|
# """
|
|
# Events timeline: $timeline
|
|
# $errornote
|
|
# """
|
|
|
|
# _prompt =
|
|
# [
|
|
# Dict(:name=> "system", :text=> systemmsg),
|
|
# Dict(:name=> "user", :text=> usermsg)
|
|
# ]
|
|
|
|
# # put in model format
|
|
# prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
|
# prompt *=
|
|
# """
|
|
# <|start_header_id|>assistant<|end_header_id|>
|
|
# """
|
|
|
|
# try
|
|
# response = text2textInstructLLM(prompt)
|
|
# # responsedict = GeneralUtils.textToDict(response,
|
|
# # ["summary", "presented", "selected"],
|
|
# # rightmarker=":", symbolkey=true)
|
|
# # println("--> generateSituationReport ", @__FILE__, " ", @__LINE__)
|
|
# pprintln(response)
|
|
|
|
# eventcount = count("event", response)
|
|
|
|
# if eventcount < length(a.memory[:events])
|
|
# error("the summary is missing some events ", @__FILE__, " ", @__LINE__)
|
|
# end
|
|
|
|
# return Dict(:recap=>response)
|
|
# catch e
|
|
# io = IOBuffer()
|
|
# showerror(io, e)
|
|
# errorMsg = String(take!(io))
|
|
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
|
# println("")
|
|
# println("Attempt $attempt. Error occurred: $errorMsg\n$st")
|
|
# println("")
|
|
# end
|
|
# end
|
|
# error("generateSituationReport failed to generate a thought ", response)
|
|
# end
|
|
|
|
|
|
|
|
# """
|
|
|
|
# # Arguments
|
|
# - `a::T1`
|
|
# one of Yiem's agent
|
|
# - `state::T2`
|
|
# a game state
|
|
|
|
# # Return
|
|
# - `evaluation::Tuple{String, Integer}`
|
|
# evaluation and score
|
|
|
|
# # Example
|
|
# ```jldoctest
|
|
# julia>
|
|
# ```
|
|
|
|
# # TODO
|
|
# - [] update docs
|
|
# - [] implement the function
|
|
|
|
# # Signature
|
|
# """
|
|
# function comparer(a::T1, state::T2)::Tuple{String, Integer} where {T1<:agent, T2<:AbstractDict}
|
|
|
|
# _prompt =
|
|
# """
|
|
# Analyze the trajectories of a solution to a question answering task. The trajectories are
|
|
# labeled by environmental observations about the situation, thoughts that can reason about
|
|
# the current situation and actions that can be three types:
|
|
# 1) winestock[query], which you can use to find wine in your inventory.
|
|
# 2) CHATBOX[text], which you can use to interact with the user.
|
|
# 3) recommendbox[answer], which returns your wine recommendation to the user.
|
|
|
|
# Given a question and a trajectory, evaluate its correctness and provide your reasoning and
|
|
# analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
|
|
# can be correct if the thoughts and actions so far are correct, even if the answer is not found
|
|
# yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
|
|
# where s is an integer from 0 to 10.
|
|
|
|
# You should only respond in JSON format as describe below:
|
|
# {"evaluation": "your evaluation", "score": "your evaluation score"}
|
|
|
|
# Here are some examples:
|
|
# {
|
|
# "question": "I'm looking for a sedan with an automatic driving feature.",
|
|
# "thought_1": "I have many types of sedans in my inventory, each with diverse features.",
|
|
# "thought_2": "But there is only 1 model that has the feature customer wanted.",
|
|
# "thought_3": "I should check our inventory first to see if we have it.",
|
|
# "action_1": {"name": "inventory", "input": "Yiem model A"},
|
|
# "observation_1": "Yiem model A is in stock."
|
|
# }
|
|
# {"evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
|
|
# It is also better to have simple searches corresponding to a single entity, making this the best action.",
|
|
# "score": 10
|
|
# }
|
|
|
|
# {
|
|
# "question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
|
|
# "thought_1": "Let me check our inventory first to see if I have it.",
|
|
# "action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
|
|
# "observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
|
|
# "thought_2": "Ok, I have what the user is asking. Let's tell the user.",
|
|
# "action_2": {"name": "CHATBOX", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
|
|
# "observation_1": "This is not what I wanted."
|
|
# }
|
|
# {"evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
|
|
# not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
|
|
# "score": 0
|
|
# }
|
|
|
|
# Let's begin!:
|
|
# $(JSON3.write(state[:thoughtHistory]))
|
|
# {"evaluation"
|
|
# """
|
|
|
|
# # apply LLM specific instruct format
|
|
# externalService = a.config[:externalservice][:text2textinstruct]
|
|
# llminfo = externalService[:llminfo]
|
|
# prompt =
|
|
# if llminfo[:name] == "llama3instruct"
|
|
# formatLLMtext_llama3instruct("system", _prompt)
|
|
# else
|
|
# error("llm model name is not defied yet $(@__LINE__)")
|
|
# end
|
|
|
|
# msgMeta = GeneralUtils.generate_msgMeta(
|
|
# a.config[:externalservice][:text2textinstruct][:mqtttopic],
|
|
# senderName= "evaluator",
|
|
# senderId= a.id,
|
|
# receiverName= "text2textinstruct",
|
|
# mqttBroker= a.config[:mqttServerInfo][:broker],
|
|
# mqttBrokerPort= a.config[:mqttServerInfo][:port],
|
|
# )
|
|
|
|
# outgoingMsg = Dict(
|
|
# :msgMeta=> msgMeta,
|
|
# :payload=> Dict(
|
|
# :text=> prompt,
|
|
# :kwargs=> Dict(
|
|
# :max_tokens=> 512,
|
|
# :stop=> ["<|eot_id|>"],
|
|
# )
|
|
# )
|
|
# )
|
|
|
|
# for attempt in 1:5
|
|
# try
|
|
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
|
|
# _responseJsonStr = response[:response][:text]
|
|
# expectedJsonExample =
|
|
# """
|
|
# Here is an expected JSON format:
|
|
# {"evaluation": "...", "score": "..."}
|
|
# """
|
|
# responseJsonStr = jsoncorrection(a, _responseJsonStr, expectedJsonExample)
|
|
# evaluationDict = copy(JSON3.read(responseJsonStr))
|
|
|
|
# # check if dict has all required value
|
|
# dummya::AbstractString = evaluationDict[:evaluation]
|
|
# dummyb::Integer = evaluationDict[:score]
|
|
|
|
# return (evaluationDict[:evaluation], evaluationDict[:score])
|
|
# catch e
|
|
# io = IOBuffer()
|
|
# showerror(io, e)
|
|
# errorMsg = String(take!(io))
|
|
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
|
# println("")
|
|
# @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
|
|
# println("")
|
|
# end
|
|
# end
|
|
# error("evaluator failed to generate an evaluation")
|
|
# end
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
end # module interface |