This commit is contained in:
narawat lamaiin
2024-06-05 09:26:48 +07:00
parent bfc6c234ae
commit f0cceee043
3 changed files with 279 additions and 265 deletions

View File

@@ -46,11 +46,12 @@ macro executeStringFunction(functionStr, args...)
function_to_call(args...)
end
""" Think and choose action
# Arguments
- `a::T1`
one of Yiem's agent
- `config::T1`
config
- `state::T2`
a game state
@@ -59,6 +60,23 @@ end
# Example
```jldoctest
julia> config = Dict(
:mqttServerInfo => Dict(
:description => "mqtt server info",
:port => 1883,
:broker => "mqtt.yiem.cc"
),
:externalservice => Dict(
:text2textinstruct => Dict(
:mqtttopic => "/loadbalancer/requestingservice",
:description => "text to text service with instruct LLM",
:llminfo => Dict(
:name => "llama3instruct"
)
),
)
)
julia> output_thoughtDict = Dict(
:thought_1 => "The customer wants to buy a bottle of wine. This is a good start!",
:action_1 => Dict{Symbol, Any}(
@@ -70,7 +88,7 @@ julia> output_thoughtDict = Dict(
```
# TODO
- [x] update docstring
- [] update docstring
- [x] implement the function
- [] implement RAG to pull similar experience
- [] use customerinfo
@@ -398,260 +416,6 @@ function evaluator(config::T1, state::T2
end
error("evaluator failed to generate an evaluation")
end
# function evaluator(a::T1, state::T2)::Tuple{String, Integer} where {T1<:agent, T2<:AbstractDict}
# _prompt =
# """
# Analyze the trajectories of a solution to a question answering task. The trajectories are
# labeled by environmental observations about the situation, thoughts that can reason about
# the current situation and actions that can be three types:
# 1) winestock[query], which you can use to find wine in your inventory.
# 2) chatbox[text], which you can use to interact with the user.
# 3) recommendbox[answer], which returns your wine recommendation to the user.
# Given a question and a trajectory, evaluate its correctness and provide your reasoning and
# analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
# can be correct if the thoughts and actions so far are correct, even if the answer is not found
# yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
# where s is an integer from 0 to 10.
# You should only respond in JSON format as describe below:
# {"evaluation": "your evaluation", "score": "your evaluation score"}
# Here are some examples:
# {
# "question": "I'm looking for a sedan with an automatic driving feature.",
# "thought_1": "I have many types of sedans in my inventory, each with diverse features.",
# "thought_2": "But there is only 1 model that has the feature customer wanted.",
# "thought_3": "I should check our inventory first to see if we have it.",
# "action_1": {"name": "inventory", "input": "Yiem model A"},
# "observation_1": "Yiem model A is in stock."
# }
# {"evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
# It is also better to have simple searches corresponding to a single entity, making this the best action.",
# "score": 10
# }
# {
# "question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
# "thought_1": "Let me check our inventory first to see if I have it.",
# "action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
# "observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
# "thought_2": "Ok, I have what the user is asking. Let's tell the user.",
# "action_2": {"name": "chatbox", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
# "observation_1": "This is not what I wanted."
# }
# {"evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
# not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
# "score": 0
# }
# Let's begin!:
# $(JSON3.write(state[:thoughtHistory]))
# {"evaluation"
# """
# # apply LLM specific instruct format
# externalService = a.config[:externalservice][:text2textinstruct]
# llminfo = externalService[:llminfo]
# prompt =
# if llminfo[:name] == "llama3instruct"
# formatLLMtext_llama3instruct("system", _prompt)
# else
# error("llm model name is not defied yet $(@__LINE__)")
# end
# msgMeta = GeneralUtils.generate_msgMeta(
# a.config[:externalservice][:text2textinstruct][:mqtttopic],
# senderName= "evaluator",
# senderId= a.id,
# receiverName= "text2textinstruct",
# mqttBroker= a.config[:mqttServerInfo][:broker],
# mqttBrokerPort= a.config[:mqttServerInfo][:port],
# )
# outgoingMsg = Dict(
# :msgMeta=> msgMeta,
# :payload=> Dict(
# :text=> prompt,
# :kwargs=> Dict(
# :max_tokens=> 512,
# :stop=> ["<|eot_id|>"],
# )
# )
# )
# for attempt in 1:5
# try
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
# _responseJsonStr = response[:response][:text]
# expectedJsonExample =
# """
# Here is an expected JSON format:
# {"evaluation": "...", "score": "..."}
# """
# responseJsonStr = jsoncorrection(a, _responseJsonStr, expectedJsonExample)
# evaluationDict = copy(JSON3.read(responseJsonStr))
# # check if dict has all required value
# dummya::AbstractString = evaluationDict[:evaluation]
# dummyb::Integer = evaluationDict[:score]
# return (evaluationDict[:evaluation], evaluationDict[:score])
# catch e
# io = IOBuffer()
# showerror(io, e)
# errorMsg = String(take!(io))
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
# println("")
# @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
# println("")
# end
# end
# error("evaluator failed to generate an evaluation")
# end
# """
# # Arguments
# - `a::T1`
# one of Yiem's agent
# - `state::T2`
# a game state
# # Return
# - `evaluation::Tuple{String, Integer}`
# evaluation and score
# # Example
# ```jldoctest
# julia>
# ```
# # TODO
# - [] update docs
# - [] implement the function
# # Signature
# """
# function comparer(a::T1, state::T2)::Tuple{String, Integer} where {T1<:agent, T2<:AbstractDict}
# _prompt =
# """
# Analyze the trajectories of a solution to a question answering task. The trajectories are
# labeled by environmental observations about the situation, thoughts that can reason about
# the current situation and actions that can be three types:
# 1) winestock[query], which you can use to find wine in your inventory.
# 2) chatbox[text], which you can use to interact with the user.
# 3) recommendbox[answer], which returns your wine recommendation to the user.
# Given a question and a trajectory, evaluate its correctness and provide your reasoning and
# analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
# can be correct if the thoughts and actions so far are correct, even if the answer is not found
# yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
# where s is an integer from 0 to 10.
# You should only respond in JSON format as describe below:
# {"evaluation": "your evaluation", "score": "your evaluation score"}
# Here are some examples:
# {
# "question": "I'm looking for a sedan with an automatic driving feature.",
# "thought_1": "I have many types of sedans in my inventory, each with diverse features.",
# "thought_2": "But there is only 1 model that has the feature customer wanted.",
# "thought_3": "I should check our inventory first to see if we have it.",
# "action_1": {"name": "inventory", "input": "Yiem model A"},
# "observation_1": "Yiem model A is in stock."
# }
# {"evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
# It is also better to have simple searches corresponding to a single entity, making this the best action.",
# "score": 10
# }
# {
# "question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
# "thought_1": "Let me check our inventory first to see if I have it.",
# "action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
# "observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
# "thought_2": "Ok, I have what the user is asking. Let's tell the user.",
# "action_2": {"name": "chatbox", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
# "observation_1": "This is not what I wanted."
# }
# {"evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
# not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
# "score": 0
# }
# Let's begin!:
# $(JSON3.write(state[:thoughtHistory]))
# {"evaluation"
# """
# # apply LLM specific instruct format
# externalService = a.config[:externalservice][:text2textinstruct]
# llminfo = externalService[:llminfo]
# prompt =
# if llminfo[:name] == "llama3instruct"
# formatLLMtext_llama3instruct("system", _prompt)
# else
# error("llm model name is not defied yet $(@__LINE__)")
# end
# msgMeta = GeneralUtils.generate_msgMeta(
# a.config[:externalservice][:text2textinstruct][:mqtttopic],
# senderName= "evaluator",
# senderId= a.id,
# receiverName= "text2textinstruct",
# mqttBroker= a.config[:mqttServerInfo][:broker],
# mqttBrokerPort= a.config[:mqttServerInfo][:port],
# )
# outgoingMsg = Dict(
# :msgMeta=> msgMeta,
# :payload=> Dict(
# :text=> prompt,
# :kwargs=> Dict(
# :max_tokens=> 512,
# :stop=> ["<|eot_id|>"],
# )
# )
# )
# for attempt in 1:5
# try
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
# _responseJsonStr = response[:response][:text]
# expectedJsonExample =
# """
# Here is an expected JSON format:
# {"evaluation": "...", "score": "..."}
# """
# responseJsonStr = jsoncorrection(a, _responseJsonStr, expectedJsonExample)
# evaluationDict = copy(JSON3.read(responseJsonStr))
# # check if dict has all required value
# dummya::AbstractString = evaluationDict[:evaluation]
# dummyb::Integer = evaluationDict[:score]
# return (evaluationDict[:evaluation], evaluationDict[:score])
# catch e
# io = IOBuffer()
# showerror(io, e)
# errorMsg = String(take!(io))
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
# println("")
# @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
# println("")
# end
# end
# error("evaluator failed to generate an evaluation")
# end
"""
@@ -946,6 +710,7 @@ function conversation(a::T, userinput::Dict) where {T<:agent}
if isempty(a.plan[:currenttrajectory])
# initial state
a.plan[:currenttrajectory] = Dict{Symbol, Any}(
# deepcopy the info to prevent modifying the info unintentionally during MCTS planning
:customerinfo=> deepcopy(a.keywordinfo[:customerinfo]),
@@ -963,6 +728,9 @@ function conversation(a::T, userinput::Dict) where {T<:agent}
#[] :recap=>,
:question=> userinput[:text],
),
# store conversation for virtual customer because the virtual customer agent is just
# a function and stateless.
:virtualCustomerChatHistory=> Vector{Dict{Symbol, Any}}(
[Dict(:name=> "user", :text=> userinput[:text])]
),
@@ -1002,6 +770,141 @@ end
# """
# # Arguments
# - `a::T1`
# one of Yiem's agent
# - `state::T2`
# a game state
# # Return
# - `evaluation::Tuple{String, Integer}`
# evaluation and score
# # Example
# ```jldoctest
# julia>
# ```
# # TODO
# - [] update docs
# - [] implement the function
# # Signature
# """
# function comparer(a::T1, state::T2)::Tuple{String, Integer} where {T1<:agent, T2<:AbstractDict}
# _prompt =
# """
# Analyze the trajectories of a solution to a question answering task. The trajectories are
# labeled by environmental observations about the situation, thoughts that can reason about
# the current situation and actions that can be three types:
# 1) winestock[query], which you can use to find wine in your inventory.
# 2) chatbox[text], which you can use to interact with the user.
# 3) recommendbox[answer], which returns your wine recommendation to the user.
# Given a question and a trajectory, evaluate its correctness and provide your reasoning and
# analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
# can be correct if the thoughts and actions so far are correct, even if the answer is not found
# yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
# where s is an integer from 0 to 10.
# You should only respond in JSON format as describe below:
# {"evaluation": "your evaluation", "score": "your evaluation score"}
# Here are some examples:
# {
# "question": "I'm looking for a sedan with an automatic driving feature.",
# "thought_1": "I have many types of sedans in my inventory, each with diverse features.",
# "thought_2": "But there is only 1 model that has the feature customer wanted.",
# "thought_3": "I should check our inventory first to see if we have it.",
# "action_1": {"name": "inventory", "input": "Yiem model A"},
# "observation_1": "Yiem model A is in stock."
# }
# {"evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
# It is also better to have simple searches corresponding to a single entity, making this the best action.",
# "score": 10
# }
# {
# "question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
# "thought_1": "Let me check our inventory first to see if I have it.",
# "action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
# "observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
# "thought_2": "Ok, I have what the user is asking. Let's tell the user.",
# "action_2": {"name": "chatbox", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
# "observation_1": "This is not what I wanted."
# }
# {"evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
# not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
# "score": 0
# }
# Let's begin!:
# $(JSON3.write(state[:thoughtHistory]))
# {"evaluation"
# """
# # apply LLM specific instruct format
# externalService = a.config[:externalservice][:text2textinstruct]
# llminfo = externalService[:llminfo]
# prompt =
# if llminfo[:name] == "llama3instruct"
# formatLLMtext_llama3instruct("system", _prompt)
# else
# error("llm model name is not defied yet $(@__LINE__)")
# end
# msgMeta = GeneralUtils.generate_msgMeta(
# a.config[:externalservice][:text2textinstruct][:mqtttopic],
# senderName= "evaluator",
# senderId= a.id,
# receiverName= "text2textinstruct",
# mqttBroker= a.config[:mqttServerInfo][:broker],
# mqttBrokerPort= a.config[:mqttServerInfo][:port],
# )
# outgoingMsg = Dict(
# :msgMeta=> msgMeta,
# :payload=> Dict(
# :text=> prompt,
# :kwargs=> Dict(
# :max_tokens=> 512,
# :stop=> ["<|eot_id|>"],
# )
# )
# )
# for attempt in 1:5
# try
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
# _responseJsonStr = response[:response][:text]
# expectedJsonExample =
# """
# Here is an expected JSON format:
# {"evaluation": "...", "score": "..."}
# """
# responseJsonStr = jsoncorrection(a, _responseJsonStr, expectedJsonExample)
# evaluationDict = copy(JSON3.read(responseJsonStr))
# # check if dict has all required value
# dummya::AbstractString = evaluationDict[:evaluation]
# dummyb::Integer = evaluationDict[:score]
# return (evaluationDict[:evaluation], evaluationDict[:score])
# catch e
# io = IOBuffer()
# showerror(io, e)
# errorMsg = String(take!(io))
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
# println("")
# @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
# println("")
# end
# end
# error("evaluator failed to generate an evaluation")
# end