This commit is contained in:
narawat lamaiin
2024-06-05 09:26:48 +07:00
parent bfc6c234ae
commit f0cceee043
3 changed files with 279 additions and 265 deletions

View File

@@ -46,11 +46,12 @@ macro executeStringFunction(functionStr, args...)
function_to_call(args...)
end
""" Think and choose action
# Arguments
- `a::T1`
one of Yiem's agent
- `config::T1`
config
- `state::T2`
a game state
@@ -59,6 +60,23 @@ end
# Example
```jldoctest
julia> config = Dict(
:mqttServerInfo => Dict(
:description => "mqtt server info",
:port => 1883,
:broker => "mqtt.yiem.cc"
),
:externalservice => Dict(
:text2textinstruct => Dict(
:mqtttopic => "/loadbalancer/requestingservice",
:description => "text to text service with instruct LLM",
:llminfo => Dict(
:name => "llama3instruct"
)
),
)
)
julia> output_thoughtDict = Dict(
:thought_1 => "The customer wants to buy a bottle of wine. This is a good start!",
:action_1 => Dict{Symbol, Any}(
@@ -70,7 +88,7 @@ julia> output_thoughtDict = Dict(
```
# TODO
- [x] update docstring
- [] update docstring
- [x] implement the function
- [] implement RAG to pull similar experience
- [] use customerinfo
@@ -398,260 +416,6 @@ function evaluator(config::T1, state::T2
end
error("evaluator failed to generate an evaluation")
end
# function evaluator(a::T1, state::T2)::Tuple{String, Integer} where {T1<:agent, T2<:AbstractDict}
# _prompt =
# """
# Analyze the trajectories of a solution to a question answering task. The trajectories are
# labeled by environmental observations about the situation, thoughts that can reason about
# the current situation and actions that can be three types:
# 1) winestock[query], which you can use to find wine in your inventory.
# 2) chatbox[text], which you can use to interact with the user.
# 3) recommendbox[answer], which returns your wine recommendation to the user.
# Given a question and a trajectory, evaluate its correctness and provide your reasoning and
# analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
# can be correct if the thoughts and actions so far are correct, even if the answer is not found
# yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
# where s is an integer from 0 to 10.
# You should only respond in JSON format as describe below:
# {"evaluation": "your evaluation", "score": "your evaluation score"}
# Here are some examples:
# {
# "question": "I'm looking for a sedan with an automatic driving feature.",
# "thought_1": "I have many types of sedans in my inventory, each with diverse features.",
# "thought_2": "But there is only 1 model that has the feature customer wanted.",
# "thought_3": "I should check our inventory first to see if we have it.",
# "action_1": {"name": "inventory", "input": "Yiem model A"},
# "observation_1": "Yiem model A is in stock."
# }
# {"evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
# It is also better to have simple searches corresponding to a single entity, making this the best action.",
# "score": 10
# }
# {
# "question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
# "thought_1": "Let me check our inventory first to see if I have it.",
# "action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
# "observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
# "thought_2": "Ok, I have what the user is asking. Let's tell the user.",
# "action_2": {"name": "chatbox", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
# "observation_1": "This is not what I wanted."
# }
# {"evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
# not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
# "score": 0
# }
# Let's begin!:
# $(JSON3.write(state[:thoughtHistory]))
# {"evaluation"
# """
# # apply LLM specific instruct format
# externalService = a.config[:externalservice][:text2textinstruct]
# llminfo = externalService[:llminfo]
# prompt =
# if llminfo[:name] == "llama3instruct"
# formatLLMtext_llama3instruct("system", _prompt)
# else
# error("llm model name is not defied yet $(@__LINE__)")
# end
# msgMeta = GeneralUtils.generate_msgMeta(
# a.config[:externalservice][:text2textinstruct][:mqtttopic],
# senderName= "evaluator",
# senderId= a.id,
# receiverName= "text2textinstruct",
# mqttBroker= a.config[:mqttServerInfo][:broker],
# mqttBrokerPort= a.config[:mqttServerInfo][:port],
# )
# outgoingMsg = Dict(
# :msgMeta=> msgMeta,
# :payload=> Dict(
# :text=> prompt,
# :kwargs=> Dict(
# :max_tokens=> 512,
# :stop=> ["<|eot_id|>"],
# )
# )
# )
# for attempt in 1:5
# try
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
# _responseJsonStr = response[:response][:text]
# expectedJsonExample =
# """
# Here is an expected JSON format:
# {"evaluation": "...", "score": "..."}
# """
# responseJsonStr = jsoncorrection(a, _responseJsonStr, expectedJsonExample)
# evaluationDict = copy(JSON3.read(responseJsonStr))
# # check if dict has all required value
# dummya::AbstractString = evaluationDict[:evaluation]
# dummyb::Integer = evaluationDict[:score]
# return (evaluationDict[:evaluation], evaluationDict[:score])
# catch e
# io = IOBuffer()
# showerror(io, e)
# errorMsg = String(take!(io))
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
# println("")
# @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
# println("")
# end
# end
# error("evaluator failed to generate an evaluation")
# end
# """
# # Arguments
# - `a::T1`
# one of Yiem's agent
# - `state::T2`
# a game state
# # Return
# - `evaluation::Tuple{String, Integer}`
# evaluation and score
# # Example
# ```jldoctest
# julia>
# ```
# # TODO
# - [] update docs
# - [] implement the function
# # Signature
# """
# function comparer(a::T1, state::T2)::Tuple{String, Integer} where {T1<:agent, T2<:AbstractDict}
# _prompt =
# """
# Analyze the trajectories of a solution to a question answering task. The trajectories are
# labeled by environmental observations about the situation, thoughts that can reason about
# the current situation and actions that can be three types:
# 1) winestock[query], which you can use to find wine in your inventory.
# 2) chatbox[text], which you can use to interact with the user.
# 3) recommendbox[answer], which returns your wine recommendation to the user.
# Given a question and a trajectory, evaluate its correctness and provide your reasoning and
# analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
# can be correct if the thoughts and actions so far are correct, even if the answer is not found
# yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
# where s is an integer from 0 to 10.
# You should only respond in JSON format as describe below:
# {"evaluation": "your evaluation", "score": "your evaluation score"}
# Here are some examples:
# {
# "question": "I'm looking for a sedan with an automatic driving feature.",
# "thought_1": "I have many types of sedans in my inventory, each with diverse features.",
# "thought_2": "But there is only 1 model that has the feature customer wanted.",
# "thought_3": "I should check our inventory first to see if we have it.",
# "action_1": {"name": "inventory", "input": "Yiem model A"},
# "observation_1": "Yiem model A is in stock."
# }
# {"evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
# It is also better to have simple searches corresponding to a single entity, making this the best action.",
# "score": 10
# }
# {
# "question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
# "thought_1": "Let me check our inventory first to see if I have it.",
# "action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
# "observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
# "thought_2": "Ok, I have what the user is asking. Let's tell the user.",
# "action_2": {"name": "chatbox", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
# "observation_1": "This is not what I wanted."
# }
# {"evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
# not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
# "score": 0
# }
# Let's begin!:
# $(JSON3.write(state[:thoughtHistory]))
# {"evaluation"
# """
# # apply LLM specific instruct format
# externalService = a.config[:externalservice][:text2textinstruct]
# llminfo = externalService[:llminfo]
# prompt =
# if llminfo[:name] == "llama3instruct"
# formatLLMtext_llama3instruct("system", _prompt)
# else
# error("llm model name is not defied yet $(@__LINE__)")
# end
# msgMeta = GeneralUtils.generate_msgMeta(
# a.config[:externalservice][:text2textinstruct][:mqtttopic],
# senderName= "evaluator",
# senderId= a.id,
# receiverName= "text2textinstruct",
# mqttBroker= a.config[:mqttServerInfo][:broker],
# mqttBrokerPort= a.config[:mqttServerInfo][:port],
# )
# outgoingMsg = Dict(
# :msgMeta=> msgMeta,
# :payload=> Dict(
# :text=> prompt,
# :kwargs=> Dict(
# :max_tokens=> 512,
# :stop=> ["<|eot_id|>"],
# )
# )
# )
# for attempt in 1:5
# try
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
# _responseJsonStr = response[:response][:text]
# expectedJsonExample =
# """
# Here is an expected JSON format:
# {"evaluation": "...", "score": "..."}
# """
# responseJsonStr = jsoncorrection(a, _responseJsonStr, expectedJsonExample)
# evaluationDict = copy(JSON3.read(responseJsonStr))
# # check if dict has all required value
# dummya::AbstractString = evaluationDict[:evaluation]
# dummyb::Integer = evaluationDict[:score]
# return (evaluationDict[:evaluation], evaluationDict[:score])
# catch e
# io = IOBuffer()
# showerror(io, e)
# errorMsg = String(take!(io))
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
# println("")
# @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
# println("")
# end
# end
# error("evaluator failed to generate an evaluation")
# end
"""
@@ -946,6 +710,7 @@ function conversation(a::T, userinput::Dict) where {T<:agent}
if isempty(a.plan[:currenttrajectory])
# initial state
a.plan[:currenttrajectory] = Dict{Symbol, Any}(
# deepcopy the info to prevent modifying the info unintentionally during MCTS planning
:customerinfo=> deepcopy(a.keywordinfo[:customerinfo]),
@@ -963,6 +728,9 @@ function conversation(a::T, userinput::Dict) where {T<:agent}
#[] :recap=>,
:question=> userinput[:text],
),
# store conversation for virtual customer because the virtual customer agent is just
# a function and stateless.
:virtualCustomerChatHistory=> Vector{Dict{Symbol, Any}}(
[Dict(:name=> "user", :text=> userinput[:text])]
),
@@ -1002,6 +770,141 @@ end
# """
# # Arguments
# - `a::T1`
# one of Yiem's agent
# - `state::T2`
# a game state
# # Return
# - `evaluation::Tuple{String, Integer}`
# evaluation and score
# # Example
# ```jldoctest
# julia>
# ```
# # TODO
# - [] update docs
# - [] implement the function
# # Signature
# """
# function comparer(a::T1, state::T2)::Tuple{String, Integer} where {T1<:agent, T2<:AbstractDict}
# _prompt =
# """
# Analyze the trajectories of a solution to a question answering task. The trajectories are
# labeled by environmental observations about the situation, thoughts that can reason about
# the current situation and actions that can be three types:
# 1) winestock[query], which you can use to find wine in your inventory.
# 2) chatbox[text], which you can use to interact with the user.
# 3) recommendbox[answer], which returns your wine recommendation to the user.
# Given a question and a trajectory, evaluate its correctness and provide your reasoning and
# analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
# can be correct if the thoughts and actions so far are correct, even if the answer is not found
# yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
# where s is an integer from 0 to 10.
# You should only respond in JSON format as describe below:
# {"evaluation": "your evaluation", "score": "your evaluation score"}
# Here are some examples:
# {
# "question": "I'm looking for a sedan with an automatic driving feature.",
# "thought_1": "I have many types of sedans in my inventory, each with diverse features.",
# "thought_2": "But there is only 1 model that has the feature customer wanted.",
# "thought_3": "I should check our inventory first to see if we have it.",
# "action_1": {"name": "inventory", "input": "Yiem model A"},
# "observation_1": "Yiem model A is in stock."
# }
# {"evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
# It is also better to have simple searches corresponding to a single entity, making this the best action.",
# "score": 10
# }
# {
# "question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
# "thought_1": "Let me check our inventory first to see if I have it.",
# "action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
# "observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
# "thought_2": "Ok, I have what the user is asking. Let's tell the user.",
# "action_2": {"name": "chatbox", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
# "observation_1": "This is not what I wanted."
# }
# {"evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
# not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
# "score": 0
# }
# Let's begin!:
# $(JSON3.write(state[:thoughtHistory]))
# {"evaluation"
# """
# # apply LLM specific instruct format
# externalService = a.config[:externalservice][:text2textinstruct]
# llminfo = externalService[:llminfo]
# prompt =
# if llminfo[:name] == "llama3instruct"
# formatLLMtext_llama3instruct("system", _prompt)
# else
# error("llm model name is not defied yet $(@__LINE__)")
# end
# msgMeta = GeneralUtils.generate_msgMeta(
# a.config[:externalservice][:text2textinstruct][:mqtttopic],
# senderName= "evaluator",
# senderId= a.id,
# receiverName= "text2textinstruct",
# mqttBroker= a.config[:mqttServerInfo][:broker],
# mqttBrokerPort= a.config[:mqttServerInfo][:port],
# )
# outgoingMsg = Dict(
# :msgMeta=> msgMeta,
# :payload=> Dict(
# :text=> prompt,
# :kwargs=> Dict(
# :max_tokens=> 512,
# :stop=> ["<|eot_id|>"],
# )
# )
# )
# for attempt in 1:5
# try
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
# _responseJsonStr = response[:response][:text]
# expectedJsonExample =
# """
# Here is an expected JSON format:
# {"evaluation": "...", "score": "..."}
# """
# responseJsonStr = jsoncorrection(a, _responseJsonStr, expectedJsonExample)
# evaluationDict = copy(JSON3.read(responseJsonStr))
# # check if dict has all required value
# dummya::AbstractString = evaluationDict[:evaluation]
# dummyb::Integer = evaluationDict[:score]
# return (evaluationDict[:evaluation], evaluationDict[:score])
# catch e
# io = IOBuffer()
# showerror(io, e)
# errorMsg = String(take!(io))
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
# println("")
# @warn "Attempt $attempt. Error occurred: $errorMsg\n$st"
# println("")
# end
# end
# error("evaluator failed to generate an evaluation")
# end

119
test/prompttest_2.jl Normal file
View File

@@ -0,0 +1,119 @@
using Revise
using YiemAgent, GeneralUtils, JSON3, DataStructures, LibPQ
using SQLLLM
# _prompt =
# """
# You are a helpful assistant.
# answer the following question:
# From the following CSV text:
# "{\"tabledescription\":[\"The customer table stores information about customers. It includes details such as first name, last name, display name, username, password, gender, country, telephone number, email, birthdate, additional_search_term, other attributes (in JSON format) and a description.\",\"The wine table stores information about different wines. It includes details namely id, name, brand, manufacturer, region, country, wine_type, grape_variety, serving_temperature, intensity, sweetness, tannin, acidity, fizziness, additional_search_term, other attributes (in JSON format) and a description.\",\"The wine_food table represents the association between wines and food items. It estab" ⋯ 477 bytes ⋯ "ed to retailer names, usernames, passwords, addresses, contact persons, telephone numbers, email addresses, additional_search_term, other attributes (in JSON format) and a description.\",\"The retailer_wine table represents the relationship between retailers and wines. It stores information about the wines available from which retailers, including vintage, their price, and the currency.\",\"The retailer_food table represents the relationship between retailers and food items. It stores information about the food items available from which retailers, including their price and the currency.\"],\"tablename\":[\"customer\",\"wine\",\"wine_food\",\"food\",\"retailer\",\"retailer_wine\",\"retailer_food\"]}"
# What is the description of table wine?
# """
# prompt = YiemAgent.formatLLMtext_llama3instruct("system", _prompt)
# @show prompt
# msgMeta = Dict(:requestResponse => nothing,
# :msgPurpose => nothing,
# :receiverId => nothing,
# :getPost => nothing,
# :msgId => "4c7111e0-c30e-44c3-8f85-1c8b3f03a8be",
# :acknowledgestatus => nothing,
# :replyToMsgId => nothing,
# :msgFormatVersion => nothing,
# :mqttServerInfo => Dict(:port => 1883, :broker => "mqtt.yiem.cc"),
# :sendTopic => "/loadbalancer/requestingservice",
# :receiverName => "text2textinstruct",
# :replyTopic => nothing,
# :senderName => "decisionMaker",
# :senderSelfnote => nothing,
# :senderId => "testingSessionID",
# :timeStamp => "2024-05-04T08:06:23.561"
# )
# outgoingMsg = Dict(
# :msgMeta=> msgMeta,
# :payload=> Dict(
# :text=> prompt,
# )
# )
# _response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
# result = _response[:response][:text]
DBconnection = LibPQ.Connection("host=192.168.88.12 port=5432 dbname=yiem_wine_assistant user=yiem password=yiem@Postgres_0.0")
tableinfo, df1, df2, df3 = SQLLLM.tableinfo(DBconnection, "wine")
_prompt =
"""
You are a helpful assistant helping to answer user question from a database table.
$tableinfo
Are there any chardonnay?
"""
prompt = YiemAgent.formatLLMtext_llama3instruct("system", _prompt)
@show prompt
msgMeta = Dict(:requestResponse => nothing,
:msgPurpose => nothing,
:receiverId => nothing,
:getPost => nothing,
:msgId => "4c7111e0-c30e-44c3-8f85-1c8b3f03a8be",
:acknowledgestatus => nothing,
:replyToMsgId => nothing,
:msgFormatVersion => nothing,
:mqttServerInfo => Dict(:port => 1883, :broker => "mqtt.yiem.cc"),
:sendTopic => "/loadbalancer/requestingservice",
:receiverName => "text2textinstruct",
:replyTopic => nothing,
:senderName => "decisionMaker",
:senderSelfnote => nothing,
:senderId => "testingSessionID",
:timeStamp => "2024-05-04T08:06:23.561"
)
outgoingMsg = Dict(
:msgMeta=> msgMeta,
:payload=> Dict(
:text=> prompt,
)
)
_response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
result2 = _response[:response][:text]

View File

@@ -86,14 +86,6 @@ response = YiemAgent.winestock(a, dummyinput)
"""
Write me SQL command to search the database for wines that fits: {"wine_type": "red","sweetness": 2,"acidity": 3,"tannin": 1,"intensity": 5}
"""
mctsparam = Dict{Symbol, Any}()