Compare commits
6 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
37ba3a9d31 | ||
| bfadd53033 | |||
| 8fc3afe348 | |||
| c60037226a | |||
|
|
db6c9c5f2b | ||
|
|
6504099959 |
@@ -1,7 +1,7 @@
|
||||
name = "YiemAgent"
|
||||
uuid = "e012c34b-7f78-48e0-971c-7abb83b6f0a2"
|
||||
authors = ["narawat lamaiin <narawat@outlook.com>"]
|
||||
version = "0.1.2"
|
||||
version = "0.1.3"
|
||||
|
||||
[deps]
|
||||
DataFrames = "a93c6f00-e57d-5684-b7b6-d8193f3e46c0"
|
||||
|
||||
631
src/interface.jl
631
src/interface.jl
@@ -1,6 +1,6 @@
|
||||
module interface
|
||||
|
||||
export addNewMessage, conversation, decisionMaker, evaluator, reflector, generatechat,
|
||||
export addNewMessage, conversation, decisionMaker, reflector, generatechat,
|
||||
generalconversation, detectWineryName, generateSituationReport
|
||||
|
||||
using JSON3, DataStructures, Dates, UUIDs, HTTP, Random, PrettyPrinting, Serialization,
|
||||
@@ -56,6 +56,8 @@ end
|
||||
- `state::T2`
|
||||
a game state
|
||||
|
||||
# Keyword Arguments
|
||||
|
||||
# Return
|
||||
- `thoughtDict::Dict`
|
||||
|
||||
@@ -90,8 +92,6 @@ julia> output_thoughtDict = Dict(
|
||||
|
||||
# TODO
|
||||
- [] update docstring
|
||||
- [x] implement the function
|
||||
- [] implement RAG to pull similar experience
|
||||
- [] use customerinfo
|
||||
- [] user storeinfo
|
||||
|
||||
@@ -294,15 +294,13 @@ function decisionMaker(a::T; recent::Integer=5)::Dict{Symbol,Any} where {T<:agen
|
||||
Dict(:name => "user", :text => usermsg)
|
||||
]
|
||||
|
||||
# put in model format
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
||||
prompt *= """
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
"""
|
||||
# change qwen format put in model format
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
|
||||
|
||||
response = a.func[:text2textInstructLLM](prompt)
|
||||
response = GeneralUtils.remove_french_accents(response)
|
||||
response = replace(response, '*'=>"")
|
||||
response = replace(response, "**"=>"")
|
||||
response = replace(response, "***"=>"")
|
||||
response = replace(response, "<|eot_id|>"=>"")
|
||||
|
||||
# check if response contain more than one functions from ["CHATBOX", "CHECKINVENTORY", "ENDCONVERSATION"]
|
||||
@@ -318,9 +316,10 @@ function decisionMaker(a::T; recent::Integer=5)::Dict{Symbol,Any} where {T<:agen
|
||||
continue
|
||||
end
|
||||
|
||||
responsedict = GeneralUtils.textToDict(response,
|
||||
["Understanding", "Reasoning", "Plan", "Action_name", "Action_input"],
|
||||
rightmarker=":", symbolkey=true, lowercasekey=true)
|
||||
header = ["Understanding:", "Reasoning:", "Plan:", "Action_name:", "Action_input:"]
|
||||
dictkey = ["understanding", "reasoning", "plan", "action_name", "action_input"]
|
||||
responsedict = GeneralUtils.textToDict(response, header;
|
||||
dictKey=dictkey, symbolkey=true)
|
||||
|
||||
if responsedict[:action_name] ∉ ["CHATBOX", "CHECKINVENTORY", "ENDCONVERSATION"]
|
||||
errornote = "You must use the given functions"
|
||||
@@ -394,284 +393,280 @@ function decisionMaker(a::T; recent::Integer=5)::Dict{Symbol,Any} where {T<:agen
|
||||
end
|
||||
|
||||
|
||||
""" Assigns a scalar value to each new child node to be used for selec-
|
||||
tion and backpropagation. This value effectively quantifies the agent's progress in task completion,
|
||||
serving as a heuristic to steer the search algorithm towards the most promising regions of the tree.
|
||||
# """ Assigns a scalar value to each new child node to be used for selec-
|
||||
# tion and backpropagation. This value effectively quantifies the agent's progress in task completion,
|
||||
# serving as a heuristic to steer the search algorithm towards the most promising regions of the tree.
|
||||
|
||||
# Arguments
|
||||
- `a::T1`
|
||||
one of Yiem's agent
|
||||
- `state::T2`
|
||||
a game state
|
||||
# # Arguments
|
||||
# - `a::T1`
|
||||
# one of Yiem's agent
|
||||
# - `state::T2`
|
||||
# a game state
|
||||
|
||||
# Return
|
||||
- `evaluation::Tuple{String, Integer}`
|
||||
evaluation and score
|
||||
# # Return
|
||||
# - `evaluation::Tuple{String, Integer}`
|
||||
# evaluation and score
|
||||
|
||||
# Example
|
||||
```jldoctest
|
||||
julia>
|
||||
```
|
||||
# # Example
|
||||
# ```jldoctest
|
||||
# julia>
|
||||
# ```
|
||||
|
||||
# Signature
|
||||
"""
|
||||
function evaluator(config::T1, state::T2
|
||||
)::Tuple{String,Integer} where {T1<:AbstractDict,T2<:AbstractDict}
|
||||
# # Signature
|
||||
# """
|
||||
# function evaluator(config::T1, state::T2
|
||||
# )::Tuple{String,Integer} where {T1<:AbstractDict,T2<:AbstractDict}
|
||||
|
||||
systemmsg =
|
||||
"""
|
||||
Analyze the trajectories of a solution to a question answering task. The trajectories are
|
||||
labeled by environmental observations about the situation, thoughts that can reason about
|
||||
the current situation and actions that can be three types:
|
||||
1) CHECKINVENTORY[query], which you can use to find wine in your inventory.
|
||||
2) CHATBOX[text], which you can use to interact with the user.
|
||||
# systemmsg =
|
||||
# """
|
||||
# Analyze the trajectories of a solution to a question answering task. The trajectories are
|
||||
# labeled by environmental observations about the situation, thoughts that can reason about
|
||||
# the current situation and actions that can be three types:
|
||||
# 1) CHECKINVENTORY[query], which you can use to find wine in your inventory.
|
||||
# 2) CHATBOX[text], which you can use to interact with the user.
|
||||
|
||||
Given a question and a trajectory, evaluate its correctness and provide your reasoning and
|
||||
analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
|
||||
can be correct if the thoughts and actions so far are correct, even if the answer is not found
|
||||
yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
|
||||
where s is an integer from 0 to 10.
|
||||
# Given a question and a trajectory, evaluate its correctness and provide your reasoning and
|
||||
# analysis in detail. Focus on the latest thought, action, and observation. Incomplete trajectories
|
||||
# can be correct if the thoughts and actions so far are correct, even if the answer is not found
|
||||
# yet. Do not generate additional thoughts or actions. Then ending with the correctness score s
|
||||
# where s is an integer from 0 to 10.
|
||||
|
||||
You should only respond in JSON format as describe below:
|
||||
{"evaluation": "your evaluation", "score": "your evaluation score"}
|
||||
# You should only respond in JSON format as describe below:
|
||||
# {"evaluation": "your evaluation", "score": "your evaluation score"}
|
||||
|
||||
Here are some examples:
|
||||
user:
|
||||
{
|
||||
"question": "I'm looking for a sedan with an automatic driving feature.",
|
||||
"thought_1": "I have many types of sedans in my inventory, each with diverse features.",
|
||||
"thought_2": "But there is only 1 model that has the feature customer wanted.",
|
||||
"thought_3": "I should check our inventory first to see if we have it.",
|
||||
"action_1": {"name": "inventory", "input": "Yiem model A"},
|
||||
"observation_1": "Yiem model A is in stock."
|
||||
}
|
||||
assistant
|
||||
{
|
||||
"evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
|
||||
It is also better to have simple searches corresponding to a single entity, making this the best action.",
|
||||
"score": 10
|
||||
}
|
||||
# Here are some examples:
|
||||
# user:
|
||||
# {
|
||||
# "question": "I'm looking for a sedan with an automatic driving feature.",
|
||||
# "thought_1": "I have many types of sedans in my inventory, each with diverse features.",
|
||||
# "thought_2": "But there is only 1 model that has the feature customer wanted.",
|
||||
# "thought_3": "I should check our inventory first to see if we have it.",
|
||||
# "action_1": {"name": "inventory", "input": "Yiem model A"},
|
||||
# "observation_1": "Yiem model A is in stock."
|
||||
# }
|
||||
# assistant
|
||||
# {
|
||||
# "evaluation": "This trajectory is correct as it is reasonable to check an inventory for info provided in the question.
|
||||
# It is also better to have simple searches corresponding to a single entity, making this the best action.",
|
||||
# "score": 10
|
||||
# }
|
||||
|
||||
user:
|
||||
{
|
||||
"question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
|
||||
"thought_1": "Let me check our inventory first to see if I have it.",
|
||||
"action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
|
||||
"observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
|
||||
"thought_2": "Ok, I have what the user is asking. Let's tell the user.",
|
||||
"action_2": {"name": "CHATBOX", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
|
||||
"observation_1": "This is not what I wanted."
|
||||
}
|
||||
assistant:
|
||||
{
|
||||
"evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
|
||||
not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
|
||||
"score": 0
|
||||
}
|
||||
# user:
|
||||
# {
|
||||
# "question": "Do you have an all-in-one pen with 4 colors and a pencil for sale?",
|
||||
# "thought_1": "Let me check our inventory first to see if I have it.",
|
||||
# "action_1": {"name": "inventory", "input": "pen with 4 color and a pencil."},
|
||||
# "observation_1": "I found {1: "Pilot Dr. grip 4-in-1 pen", 2: "Rotting pencil"}",
|
||||
# "thought_2": "Ok, I have what the user is asking. Let's tell the user.",
|
||||
# "action_2": {"name": "CHATBOX", "input": "Yes, we do have a Pilot Dr. grip 4-in-1 pen and a Rotting pencil"},
|
||||
# "observation_1": "This is not what I wanted."
|
||||
# }
|
||||
# assistant:
|
||||
# {
|
||||
# "evaluation": "This trajectory is incorrect as my search term should be related to a 4-colors pen with a pencil in it,
|
||||
# not a pen and a pencil seperately. A better search term should have been a 4-colors pen with a pencil, all-in-one.",
|
||||
# "score": 0
|
||||
# }
|
||||
|
||||
Let's begin!
|
||||
"""
|
||||
# Let's begin!
|
||||
# """
|
||||
|
||||
usermsg = """
|
||||
$(JSON3.write(state[:thoughtHistory]))
|
||||
"""
|
||||
# usermsg = """
|
||||
# $(JSON3.write(state[:thoughtHistory]))
|
||||
# """
|
||||
|
||||
chathistory =
|
||||
[
|
||||
Dict(:name => "system", :text => systemmsg),
|
||||
Dict(:name => "user", :text => usermsg)
|
||||
]
|
||||
# chathistory =
|
||||
# [
|
||||
# Dict(:name => "system", :text => systemmsg),
|
||||
# Dict(:name => "user", :text => usermsg)
|
||||
# ]
|
||||
|
||||
# put in model format
|
||||
prompt = formatLLMtext(chathistory, "llama3instruct")
|
||||
prompt *= """
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
{
|
||||
"""
|
||||
# # put in model format
|
||||
# prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
|
||||
|
||||
pprint(prompt)
|
||||
externalService = config[:externalservice][:text2textinstruct]
|
||||
# pprint(prompt)
|
||||
# externalService = config[:externalservice][:text2textinstruct]
|
||||
|
||||
|
||||
# apply LLM specific instruct format
|
||||
externalService = config[:externalservice][:text2textinstruct]
|
||||
# # apply LLM specific instruct format
|
||||
# externalService = config[:externalservice][:text2textinstruct]
|
||||
|
||||
msgMeta = GeneralUtils.generate_msgMeta(
|
||||
externalService[:mqtttopic],
|
||||
senderName="evaluator",
|
||||
senderId=string(uuid4()),
|
||||
receiverName="text2textinstruct",
|
||||
mqttBroker=config[:mqttServerInfo][:broker],
|
||||
mqttBrokerPort=config[:mqttServerInfo][:port],
|
||||
)
|
||||
# msgMeta = GeneralUtils.generate_msgMeta(
|
||||
# externalService[:mqtttopic],
|
||||
# senderName="evaluator",
|
||||
# senderId=string(uuid4()),
|
||||
# receiverName="text2textinstruct",
|
||||
# mqttBroker=config[:mqttServerInfo][:broker],
|
||||
# mqttBrokerPort=config[:mqttServerInfo][:port],
|
||||
# )
|
||||
|
||||
outgoingMsg = Dict(
|
||||
:msgMeta => msgMeta,
|
||||
:payload => Dict(
|
||||
:text => prompt,
|
||||
:kwargs => Dict(
|
||||
:max_tokens => 512,
|
||||
:stop => ["<|eot_id|>"],
|
||||
)
|
||||
)
|
||||
)
|
||||
# outgoingMsg = Dict(
|
||||
# :msgMeta => msgMeta,
|
||||
# :payload => Dict(
|
||||
# :text => prompt,
|
||||
# :kwargs => Dict(
|
||||
# :max_tokens => 512,
|
||||
# :stop => ["<|eot_id|>"],
|
||||
# )
|
||||
# )
|
||||
# )
|
||||
|
||||
for attempt in 1:5
|
||||
try
|
||||
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
|
||||
_responseJsonStr = response[:response][:text]
|
||||
expectedJsonExample = """
|
||||
Here is an expected JSON format:
|
||||
{"evaluation": "...", "score": "..."}
|
||||
"""
|
||||
responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
|
||||
evaluationDict = copy(JSON3.read(responseJsonStr))
|
||||
# for attempt in 1:5
|
||||
# try
|
||||
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
|
||||
# _responseJsonStr = response[:response][:text]
|
||||
# expectedJsonExample = """
|
||||
# Here is an expected JSON format:
|
||||
# {"evaluation": "...", "score": "..."}
|
||||
# """
|
||||
# responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
|
||||
# evaluationDict = copy(JSON3.read(responseJsonStr))
|
||||
|
||||
# check if dict has all required value
|
||||
dummya::AbstractString = evaluationDict[:evaluation]
|
||||
dummyb::Integer = evaluationDict[:score]
|
||||
# # check if dict has all required value
|
||||
# dummya::AbstractString = evaluationDict[:evaluation]
|
||||
# dummyb::Integer = evaluationDict[:score]
|
||||
|
||||
return (evaluationDict[:evaluation], evaluationDict[:score])
|
||||
catch e
|
||||
io = IOBuffer()
|
||||
showerror(io, e)
|
||||
errorMsg = String(take!(io))
|
||||
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
||||
println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
|
||||
end
|
||||
end
|
||||
error("evaluator failed to generate an evaluation")
|
||||
end
|
||||
# return (evaluationDict[:evaluation], evaluationDict[:score])
|
||||
# catch e
|
||||
# io = IOBuffer()
|
||||
# showerror(io, e)
|
||||
# errorMsg = String(take!(io))
|
||||
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
||||
# println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
|
||||
# end
|
||||
# end
|
||||
# error("evaluator failed to generate an evaluation")
|
||||
# end
|
||||
|
||||
|
||||
"""
|
||||
# """
|
||||
|
||||
# Arguments
|
||||
# # Arguments
|
||||
|
||||
# Return
|
||||
# # Return
|
||||
|
||||
# Example
|
||||
```jldoctest
|
||||
julia>
|
||||
```
|
||||
# # Example
|
||||
# ```jldoctest
|
||||
# julia>
|
||||
# ```
|
||||
|
||||
# TODO
|
||||
- [] update docstring
|
||||
- [x] implement the function
|
||||
- [x] add try block. check result that it is expected before returning
|
||||
# # TODO
|
||||
# - [] update docstring
|
||||
# - [x] implement the function
|
||||
# - [x] add try block. check result that it is expected before returning
|
||||
|
||||
# Signature
|
||||
"""
|
||||
function reflector(config::T1, state::T2)::String where {T1<:AbstractDict,T2<:AbstractDict}
|
||||
# https://github.com/andyz245/LanguageAgentTreeSearch/blob/main/hotpot/hotpot.py
|
||||
# # Signature
|
||||
# """
|
||||
# function reflector(config::T1, state::T2)::String where {T1<:AbstractDict,T2<:AbstractDict}
|
||||
# # https://github.com/andyz245/LanguageAgentTreeSearch/blob/main/hotpot/hotpot.py
|
||||
|
||||
_prompt =
|
||||
"""
|
||||
You are a helpful sommelier working for a wine store.
|
||||
Your goal is to recommend the best wine from your inventory that match the user preferences.
|
||||
You will be given a question and a trajectory of the previous help you've done for a user.
|
||||
You were unsuccessful in helping the user either because you guessed the wrong answer with Finish[answer], or you didn't know the user enough.
|
||||
In a few sentences, Diagnose a possible reason for failure and devise a new, concise, high level plan that aims to mitigate the same failure.
|
||||
Use complete sentences.
|
||||
# _prompt =
|
||||
# """
|
||||
# You are a helpful sommelier working for a wine store.
|
||||
# Your goal is to recommend the best wine from your inventory that match the user preferences.
|
||||
# You will be given a question and a trajectory of the previous help you've done for a user.
|
||||
# You were unsuccessful in helping the user either because you guessed the wrong answer with Finish[answer], or you didn't know the user enough.
|
||||
# In a few sentences, Diagnose a possible reason for failure and devise a new, concise, high level plan that aims to mitigate the same failure.
|
||||
# Use complete sentences.
|
||||
|
||||
You should only respond in JSON format as describe below:
|
||||
{"reflection": "your relection"}
|
||||
# You should only respond in JSON format as describe below:
|
||||
# {"reflection": "your relection"}
|
||||
|
||||
Here are some examples:
|
||||
Previous Trial:
|
||||
{
|
||||
"question": "Hello, I would like a get a bottle of wine",
|
||||
"thought_1": "A customer wants to buy a bottle of wine. Before making a recommendation, I need to know more about their preferences.",
|
||||
"action_1": {"name": "CHATBOX", "input": "What is the occasion for which you're buying this wine?"},
|
||||
"observation_1": "We are holding a wedding party",
|
||||
# Here are some examples:
|
||||
# Previous Trial:
|
||||
# {
|
||||
# "question": "Hello, I would like a get a bottle of wine",
|
||||
# "thought_1": "A customer wants to buy a bottle of wine. Before making a recommendation, I need to know more about their preferences.",
|
||||
# "action_1": {"name": "CHATBOX", "input": "What is the occasion for which you're buying this wine?"},
|
||||
# "observation_1": "We are holding a wedding party",
|
||||
|
||||
"thought_2": "A wedding party, that's a great occasion! The customer might be looking for a celebratory drink. Let me ask some more questions to narrow down the options.",
|
||||
"action_2": {"name": "CHATBOX", "input": "What type of food will you be serving at the wedding?"},
|
||||
"observation_2": "It will be Thai dishes.",
|
||||
# "thought_2": "A wedding party, that's a great occasion! The customer might be looking for a celebratory drink. Let me ask some more questions to narrow down the options.",
|
||||
# "action_2": {"name": "CHATBOX", "input": "What type of food will you be serving at the wedding?"},
|
||||
# "observation_2": "It will be Thai dishes.",
|
||||
|
||||
"thought_3": "With Thai food, I should recommend a wine that complements its spicy and savory flavors. And since it's a celebratory occasion, the customer might prefer a full-bodied wine.",
|
||||
"action_3": {"name": "CHATBOX", "input": "What is your budget for this bottle of wine?"},
|
||||
"observation_3": "I would spend up to 50 bucks.",
|
||||
# "thought_3": "With Thai food, I should recommend a wine that complements its spicy and savory flavors. And since it's a celebratory occasion, the customer might prefer a full-bodied wine.",
|
||||
# "action_3": {"name": "CHATBOX", "input": "What is your budget for this bottle of wine?"},
|
||||
# "observation_3": "I would spend up to 50 bucks.",
|
||||
|
||||
"thought_4": "Now that I have some more information, it's time to narrow down the options.",
|
||||
"action_4": {"name": "winestock", "input": "red wine with full body, pairs well with spicy food, budget \$50"},
|
||||
"observation_4": "I found the following wines in our stock: \n{\n 1: El Enemigo Cabernet Franc 2019\n2: Tantara Chardonnay 2017\n\n}\n",
|
||||
# "thought_4": "Now that I have some more information, it's time to narrow down the options.",
|
||||
# "action_4": {"name": "winestock", "input": "red wine with full body, pairs well with spicy food, budget \$50"},
|
||||
# "observation_4": "I found the following wines in our stock: \n{\n 1: El Enemigo Cabernet Franc 2019\n2: Tantara Chardonnay 2017\n\n}\n",
|
||||
|
||||
"thought_5": "Now that I have a list of potential wines, I need to know more about the customer's taste preferences.",
|
||||
"action_5": {"name": "CHATBOX", "input": "What type of wine characteristics are you looking for? (e.g. tannin level, sweetness, intensity, acidity)"},
|
||||
"observation_5": "I like full-bodied red wine with low tannin.",
|
||||
# "thought_5": "Now that I have a list of potential wines, I need to know more about the customer's taste preferences.",
|
||||
# "action_5": {"name": "CHATBOX", "input": "What type of wine characteristics are you looking for? (e.g. tannin level, sweetness, intensity, acidity)"},
|
||||
# "observation_5": "I like full-bodied red wine with low tannin.",
|
||||
|
||||
"thought_6": "Now that I have more information about the customer's preferences, it's time to make a recommendation.",
|
||||
"action_6": {"name": "recommendbox", "input": "El Enemigo Cabernet Franc 2019"},
|
||||
"observation_6": "I don't like the one you recommend. I want dry wine."
|
||||
}
|
||||
# "thought_6": "Now that I have more information about the customer's preferences, it's time to make a recommendation.",
|
||||
# "action_6": {"name": "recommendbox", "input": "El Enemigo Cabernet Franc 2019"},
|
||||
# "observation_6": "I don't like the one you recommend. I want dry wine."
|
||||
# }
|
||||
|
||||
{
|
||||
"reflection": "I asked the user about the occasion, food type, and budget, and then searched for wine in the inventory right away. However, I should have asked the user for the specific wine type and their preferences in order to gather more information before making a recommendation."
|
||||
}
|
||||
# {
|
||||
# "reflection": "I asked the user about the occasion, food type, and budget, and then searched for wine in the inventory right away. However, I should have asked the user for the specific wine type and their preferences in order to gather more information before making a recommendation."
|
||||
# }
|
||||
|
||||
Let's begin!
|
||||
# Let's begin!
|
||||
|
||||
Previous trial:
|
||||
$(JSON3.write(state[:thoughtHistory]))
|
||||
{"reflection"
|
||||
"""
|
||||
# Previous trial:
|
||||
# $(JSON3.write(state[:thoughtHistory]))
|
||||
# {"reflection"
|
||||
# """
|
||||
|
||||
# apply LLM specific instruct format
|
||||
externalService = config[:externalservice][:text2textinstruct]
|
||||
llminfo = externalService[:llminfo]
|
||||
prompt =
|
||||
if llminfo[:name] == "llama3instruct"
|
||||
formatLLMtext_llama3instruct("system", _prompt)
|
||||
else
|
||||
error("llm model name is not defied yet $(@__LINE__)")
|
||||
end
|
||||
# # apply LLM specific instruct format
|
||||
# externalService = config[:externalservice][:text2textinstruct]
|
||||
# llminfo = externalService[:llminfo]
|
||||
# prompt =
|
||||
# if llminfo[:name] == "llama3instruct"
|
||||
# formatLLMtext_llama3instruct("system", _prompt)
|
||||
# else
|
||||
# error("llm model name is not defied yet $(@__LINE__)")
|
||||
# end
|
||||
|
||||
msgMeta = GeneralUtils.generate_msgMeta(
|
||||
a.config[:externalservice][:text2textinstruct][:mqtttopic],
|
||||
senderName="reflector",
|
||||
senderId=string(uuid4()),
|
||||
receiverName="text2textinstruct",
|
||||
mqttBroker=config[:mqttServerInfo][:broker],
|
||||
mqttBrokerPort=config[:mqttServerInfo][:port],
|
||||
)
|
||||
# msgMeta = GeneralUtils.generate_msgMeta(
|
||||
# a.config[:externalservice][:text2textinstruct][:mqtttopic],
|
||||
# senderName="reflector",
|
||||
# senderId=string(uuid4()),
|
||||
# receiverName="text2textinstruct",
|
||||
# mqttBroker=config[:mqttServerInfo][:broker],
|
||||
# mqttBrokerPort=config[:mqttServerInfo][:port],
|
||||
# )
|
||||
|
||||
outgoingMsg = Dict(
|
||||
:msgMeta => msgMeta,
|
||||
:payload => Dict(
|
||||
:text => prompt,
|
||||
:kwargs => Dict(
|
||||
:max_tokens => 512,
|
||||
:stop => ["<|eot_id|>"],
|
||||
)
|
||||
)
|
||||
)
|
||||
# outgoingMsg = Dict(
|
||||
# :msgMeta => msgMeta,
|
||||
# :payload => Dict(
|
||||
# :text => prompt,
|
||||
# :kwargs => Dict(
|
||||
# :max_tokens => 512,
|
||||
# :stop => ["<|eot_id|>"],
|
||||
# )
|
||||
# )
|
||||
# )
|
||||
|
||||
for attempt in 1:5
|
||||
try
|
||||
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
|
||||
_responseJsonStr = response[:response][:text]
|
||||
expectedJsonExample = """
|
||||
Here is an expected JSON format:
|
||||
{"reflection": "..."}
|
||||
"""
|
||||
responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
|
||||
reflectionDict = copy(JSON3.read(responseJsonStr))
|
||||
# for attempt in 1:5
|
||||
# try
|
||||
# response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
|
||||
# _responseJsonStr = response[:response][:text]
|
||||
# expectedJsonExample = """
|
||||
# Here is an expected JSON format:
|
||||
# {"reflection": "..."}
|
||||
# """
|
||||
# responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
|
||||
# reflectionDict = copy(JSON3.read(responseJsonStr))
|
||||
|
||||
# check if dict has all required value
|
||||
dummya::AbstractString = reflectionDict[:reflection]
|
||||
# # check if dict has all required value
|
||||
# dummya::AbstractString = reflectionDict[:reflection]
|
||||
|
||||
return reflectionDict[:reflection]
|
||||
catch e
|
||||
io = IOBuffer()
|
||||
showerror(io, e)
|
||||
errorMsg = String(take!(io))
|
||||
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
||||
println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
|
||||
end
|
||||
end
|
||||
error("reflector failed to generate a thought")
|
||||
end
|
||||
# return reflectionDict[:reflection]
|
||||
# catch e
|
||||
# io = IOBuffer()
|
||||
# showerror(io, e)
|
||||
# errorMsg = String(take!(io))
|
||||
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
||||
# println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
|
||||
# end
|
||||
# end
|
||||
# error("reflector failed to generate a thought")
|
||||
# end
|
||||
|
||||
|
||||
""" Chat with llm.
|
||||
@@ -863,46 +858,6 @@ function think(a::T)::NamedTuple{(:actionname, :result),Tuple{String,String}} wh
|
||||
)
|
||||
result = chatresponse
|
||||
|
||||
# # store thoughtDict after the conversation finish
|
||||
# if a.memory[:events][end][:thought][:action_name] == "ENDCONVERSATION"
|
||||
# # generateSituationReport in the agent didn't include the last conversation
|
||||
# # so the function will be called here
|
||||
# a.memory[:recap] = generateSituationReport(a, a.func[:text2textInstructLLM]; skiprecent=0)
|
||||
|
||||
# for (i, event) in enumerate(a.memory[:events])
|
||||
# if event[:subject] == "assistant"
|
||||
# # create timeline of the last 3 conversation except the last one.
|
||||
# # The former will be used as caching key and the latter will be the caching target
|
||||
# # in vector database
|
||||
# all_recapkeys = keys(a.memory[:recap]) # recap as caching
|
||||
# all_recapkeys_vec = [r for r in all_recapkeys] # convert to a vector
|
||||
|
||||
# # select from 1 to 2nd-to-lase event (i.e. excluding the latest which is assistant's response)
|
||||
# _recapkeys_vec = all_recapkeys_vec[1:i-1]
|
||||
|
||||
# # select only previous 3 recaps
|
||||
# recapkeys_vec =
|
||||
# if length(_recapkeys_vec) <= 3 # 1st message is a user's hello msg
|
||||
# _recapkeys_vec # choose all
|
||||
# else
|
||||
# _recapkeys_vec[end-2:end]
|
||||
# end
|
||||
# #[PENDING] if there is specific data such as number, donot store in database
|
||||
# tempmem = DataStructures.OrderedDict()
|
||||
# for k in recapkeys_vec
|
||||
# tempmem[k] = a.memory[:recap][k]
|
||||
# end
|
||||
|
||||
# recap = GeneralUtils.dictToString_noKey(tempmem)
|
||||
# thoughtDict = a.memory[:events][i][:thought] # latest assistant thoughtDict
|
||||
# a.func[:insertSommelierDecision](recap, thoughtDict)
|
||||
# else
|
||||
# # skip
|
||||
# end
|
||||
# end
|
||||
# println("Caching conversation done")
|
||||
# end
|
||||
|
||||
elseif actionname == "CHECKINVENTORY"
|
||||
if rawresponse !== nothing
|
||||
vd = GeneralUtils.dfToVectorDict(rawresponse)
|
||||
@@ -999,6 +954,9 @@ function generatechat(a::sommelier, thoughtDict)
|
||||
Let's begin!
|
||||
"""
|
||||
|
||||
header = ["Chat:"]
|
||||
dictkey = ["chat"]
|
||||
|
||||
# a.memory[:shortmem][:available_wine] is a vector of dictionary
|
||||
context =
|
||||
if length(a.memory[:shortmem][:available_wine]) != 0
|
||||
@@ -1036,10 +994,7 @@ function generatechat(a::sommelier, thoughtDict)
|
||||
]
|
||||
|
||||
# put in model format
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
||||
prompt *= """
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
"""
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
|
||||
|
||||
try
|
||||
response = a.func[:text2textInstructLLM](prompt)
|
||||
@@ -1054,22 +1009,27 @@ function generatechat(a::sommelier, thoughtDict)
|
||||
response = replace(response, '`' => "")
|
||||
response = replace(response, "<|eot_id|>"=>"")
|
||||
response = GeneralUtils.remove_french_accents(response)
|
||||
responsedict = GeneralUtils.textToDict(response, ["Chat"],
|
||||
rightmarker=":", symbolkey=true, lowercasekey=true)
|
||||
|
||||
for i ∈ [:chat]
|
||||
if length(JSON3.write(responsedict[i])) == 0
|
||||
error("$i is empty ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
|
||||
end
|
||||
# check whether response has all header
|
||||
detected_kw = GeneralUtils.detect_keyword(header, response)
|
||||
if sum(values(detected_kw)) < length(header)
|
||||
errornote = "\nSQL decisionMaker() response does not have all header"
|
||||
continue
|
||||
elseif sum(values(detected_kw)) > length(header)
|
||||
errornote = "\nSQL decisionMaker() response has duplicated header"
|
||||
continue
|
||||
end
|
||||
|
||||
# check if there are more than 1 key per categories
|
||||
for i ∈ [:chat]
|
||||
matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
|
||||
if length(matchkeys) > 1
|
||||
error("generatechat has more than one key per categories")
|
||||
end
|
||||
end
|
||||
responsedict = GeneralUtils.textToDict(response, header;
|
||||
dictKey=dictkey, symbolkey=true)
|
||||
|
||||
# # check if there are more than 1 key per categories
|
||||
# for i ∈ Symbol.(dictkey)
|
||||
# matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
|
||||
# if length(matchkeys) > 1
|
||||
# error("generatechat has more than one key per categories")
|
||||
# end
|
||||
# end
|
||||
|
||||
# check if Context: is in chat
|
||||
if occursin("Context:", responsedict[:chat])
|
||||
@@ -1161,10 +1121,7 @@ function generatechat(a::companion)
|
||||
]
|
||||
|
||||
# put in model format
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
||||
prompt *= """
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
"""
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
|
||||
|
||||
response = a.text2textInstructLLM(prompt)
|
||||
|
||||
@@ -1333,13 +1290,36 @@ function generatequestion(a, text2textInstructLLM::Function; recent=nothing)::St
|
||||
]
|
||||
|
||||
# put in model format
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
||||
prompt *= """
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
"""
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
|
||||
|
||||
try
|
||||
response = text2textInstructLLM(prompt)
|
||||
# make sure generatequestion() don't have wine name that is not from retailer inventory
|
||||
# check whether an agent recommend wines before checking inventory or recommend wines
|
||||
# outside its inventory
|
||||
# ask LLM whether there are any winery mentioned in the response
|
||||
mentioned_winery = detectWineryName(a, response)
|
||||
if mentioned_winery != "None"
|
||||
mentioned_winery = String.(strip.(split(mentioned_winery, ",")))
|
||||
|
||||
# check whether the wine is in event
|
||||
isWineInEvent = false
|
||||
for winename in mentioned_winery
|
||||
for event in a.memory[:events]
|
||||
if event[:outcome] !== nothing && occursin(winename, event[:outcome])
|
||||
isWineInEvent = true
|
||||
break
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
# if wine is mentioned but not in timeline or shortmem,
|
||||
# then the agent is not supposed to recommend the wine
|
||||
if isWineInEvent == false
|
||||
errornote = "Previously, You mentioned wines that is not in your inventory which is not allowed."
|
||||
error("Previously, You mentioned wines that is not in your inventory which is not allowed.")
|
||||
end
|
||||
end
|
||||
|
||||
# sometime LLM generate more than 1 Understanding:
|
||||
understanding_number = count("Understanding:", response)
|
||||
@@ -1359,9 +1339,10 @@ function generatequestion(a, text2textInstructLLM::Function; recent=nothing)::St
|
||||
error("no answer found in the response ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
|
||||
end
|
||||
|
||||
responsedict = GeneralUtils.textToDict(response,
|
||||
["Understanding", "Q1"],
|
||||
rightmarker=":", symbolkey=true, lowercasekey=true)
|
||||
header = ["Understanding:", "Q1:"]
|
||||
dictkey = ["understanding", "q1"]
|
||||
responsedict = GeneralUtils.textToDict(response, header;
|
||||
dictKey=dictkey, symbolkey=true)
|
||||
response = "Q1: " * responsedict[:q1]
|
||||
println("\n~~~ generatequestion ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
|
||||
pprintln(response)
|
||||
@@ -1412,6 +1393,9 @@ function generateSituationReport(a, text2textInstructLLM::Function; skiprecent::
|
||||
Let's begin!
|
||||
"""
|
||||
|
||||
header = ["Event_$i:" for i in eachindex(a.memory[:events])]
|
||||
dictkey = lowercase.(["Event_$i" for i in eachindex(a.memory[:events])])
|
||||
|
||||
if length(a.memory[:events]) <= skiprecent
|
||||
return nothing
|
||||
end
|
||||
@@ -1437,15 +1421,11 @@ function generateSituationReport(a, text2textInstructLLM::Function; skiprecent::
|
||||
]
|
||||
|
||||
# put in model format
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
||||
prompt *= """
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
"""
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
|
||||
|
||||
response = text2textInstructLLM(prompt)
|
||||
eventheader = ["Event_$i" for i in eachindex(a.memory[:events])]
|
||||
responsedict = GeneralUtils.textToDict(response, eventheader,
|
||||
rightmarker=":", symbolkey=true)
|
||||
responsedict = GeneralUtils.textToDict(response, header;
|
||||
dictKey=dictkey, symbolkey=true)
|
||||
|
||||
println("\n~~~ generateSituationReport() ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
|
||||
pprintln(response)
|
||||
@@ -1494,18 +1474,17 @@ function detectWineryName(a, text)
|
||||
]
|
||||
|
||||
# put in model format
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
||||
prompt *= """
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
"""
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
|
||||
|
||||
try
|
||||
response = a.func[:text2textInstructLLM](prompt)
|
||||
println("\n~~~ detectWineryName() ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
|
||||
pprintln(response)
|
||||
|
||||
responsedict = GeneralUtils.textToDict(response, ["winery_names"],
|
||||
rightmarker=":", symbolkey=true, lowercasekey=true)
|
||||
header = ["Winery_names:"]
|
||||
dictkey = ["winery_names"]
|
||||
responsedict = GeneralUtils.textToDict(response, header;
|
||||
dictKey=dictkey, symbolkey=true)
|
||||
|
||||
result = responsedict[:winery_names]
|
||||
|
||||
|
||||
@@ -326,7 +326,7 @@ julia>
|
||||
|
||||
# TODO
|
||||
- [] update docstring
|
||||
- [x] implement the function
|
||||
- implement the function
|
||||
|
||||
# Signature
|
||||
"""
|
||||
@@ -336,31 +336,41 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
|
||||
"""
|
||||
As a helpful sommelier, your task is to extract the user information from the user's query as much as possible to fill out user's preference form.
|
||||
|
||||
At each round of conversation, the user will give you the current situation:
|
||||
At each round of conversation, the user will give you the following:
|
||||
User's query: ...
|
||||
|
||||
You must follow the following guidelines:
|
||||
1) If specific information required in the preference form is not available in the query or there isn't any, mark with "NA" to indicate this.
|
||||
- If specific information required in the preference form is not available in the query or there isn't any, mark with "NA" to indicate this.
|
||||
Additionally, words like 'any' or 'unlimited' mean no information is available.
|
||||
2) Do not generate other comments.
|
||||
- Do not generate other comments.
|
||||
|
||||
You should then respond to the user with the following points:
|
||||
- reasoning: state your understanding of the current situation
|
||||
- wine_name: name of the wine
|
||||
- winery: name of the winery
|
||||
- vintage: the year of the wine
|
||||
- region: a region (NOT a country) where the wine is produced, such as Burgundy, Napa Valley, etc
|
||||
- country: a country where the wine is produced. Can be "Austria", "Australia", "France", "Germany", "Italy", "Portugal", "Spain", "United States"
|
||||
- wine_type: can be one of: "red", "white", "sparkling", "rose", "dessert" or "fortified"
|
||||
- grape_varietal: the name of the primary grape used to make the wine
|
||||
- tasting_notes: a brief description of the wine's taste, such as "butter", "oak", "fruity", etc
|
||||
- wine_price: price range of wine.
|
||||
- occasion: the occasion the user is having the wine for
|
||||
- food_to_be_paired_with_wine: food that the user will be served with the wine such as poultry, fish, steak, etc
|
||||
You should then respond to the user with:
|
||||
Comprehension: state your understanding of the current situation
|
||||
Wine_name: name of the wine
|
||||
Winery: name of the winery
|
||||
Vintage: the year of the wine
|
||||
Region: a region (NOT a country) where the wine is produced, such as Burgundy, Napa Valley, etc
|
||||
Country: a country where the wine is produced. Can be "Austria", "Australia", "France", "Germany", "Italy", "Portugal", "Spain", "United States"
|
||||
Wine_type: can be one of: "red", "white", "sparkling", "rose", "dessert" or "fortified"
|
||||
Grape_varietal: the name of the primary grape used to make the wine
|
||||
Tasting_notes: a brief description of the wine's taste, such as "butter", "oak", "fruity", etc
|
||||
Wine_price: price range of wine.
|
||||
Occasion: the occasion the user is having the wine for
|
||||
Food_to_be_paired_with_wine: food that the user will be served with the wine such as poultry, fish, steak, etc
|
||||
|
||||
|
||||
You should only respond in the user's preference form (JSON) as described below:
|
||||
{"reasoning": ..., "winery": ..., "wine_name": ..., "vintage": ..., "region": ..., "country": ..., "wine_type": ..., "grape_varietal": ..., "tasting_notes": ..., "wine_price": ..., "occasion": ..., "food_to_be_paired_with_wine": ...}
|
||||
You should only respond in format as described below:
|
||||
Comprehension: ...
|
||||
Wine_name: ...
|
||||
Winery: ...
|
||||
Vintage: ...
|
||||
Region: ...
|
||||
Country: ...
|
||||
Wine_type:
|
||||
Grape_varietal: ...
|
||||
Tasting_notes: ...
|
||||
Wine_price: ...
|
||||
Occasion: ...
|
||||
Food_to_be_paired_with_wine: ...
|
||||
|
||||
Here are some example:
|
||||
User's query: red, Chenin Blanc, Riesling, 20 USD
|
||||
@@ -372,7 +382,8 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
|
||||
Let's begin!
|
||||
"""
|
||||
|
||||
attributes = ["reasoning", "winery", "wine_name", "vintage", "region", "country", "wine_type", "grape_varietal", "tasting_notes", "wine_price", "occasion", "food_to_be_paired_with_wine"]
|
||||
header = ["Comprehension:", "Wine_name:", "Winery:", "Vintage:", "Region:", "Country:", "Wine_type:", "Grape_varietal:", "Tasting_notes:", "Wine_price:", "Occasion:", "Food_to_be_paired_with_wine:"]
|
||||
dictkey = ["comprehension", "wine_name", "winery", "vintage", "region", "country", "wine_type", "grape_varietal", "tasting_notes", "wine_price", "occasion", "food_to_be_paired_with_wine"]
|
||||
errornote = ""
|
||||
|
||||
for attempt in 1:5
|
||||
@@ -389,18 +400,13 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
|
||||
]
|
||||
|
||||
# put in model format
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
||||
prompt *=
|
||||
"""
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
"""
|
||||
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
|
||||
response = a.func[:text2textInstructLLM](prompt)
|
||||
response = GeneralUtils.remove_french_accents(response)
|
||||
|
||||
# check wheter all attributes are in the response
|
||||
checkFlag = false
|
||||
for word in attributes
|
||||
for word in header
|
||||
if !occursin(word, response)
|
||||
errornote = "$word attribute is missing in previous attempts"
|
||||
println("Attempt $attempt $errornote ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
|
||||
@@ -410,11 +416,19 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
|
||||
end
|
||||
checkFlag == true ? continue : nothing
|
||||
|
||||
responsedict = copy(JSON3.read(response))
|
||||
# check whether response has all header
|
||||
detected_kw = GeneralUtils.detect_keyword(header, response)
|
||||
if sum(values(detected_kw)) < length(header)
|
||||
errornote = "\nYiemAgent extractWineAttributes_1() response does not have all header"
|
||||
continue
|
||||
elseif sum(values(detected_kw)) > length(header)
|
||||
errornote = "\nYiemAgent extractWineAttributes_1() response has duplicated header"
|
||||
continue
|
||||
end
|
||||
responsedict = GeneralUtils.textToDict(response, header;
|
||||
dictKey=dictkey, symbolkey=true)
|
||||
|
||||
# convert
|
||||
|
||||
delete!(responsedict, :reasoning)
|
||||
delete!(responsedict, :comprehension)
|
||||
delete!(responsedict, :tasting_notes)
|
||||
delete!(responsedict, :occasion)
|
||||
delete!(responsedict, :food_to_be_paired_with_wine)
|
||||
@@ -424,9 +438,9 @@ function extractWineAttributes_1(a::T1, input::T2)::String where {T1<:agent, T2<
|
||||
|
||||
# check if winery, wine_name, region, country, wine_type, grape_varietal's value are in the query because sometime AI halucinates
|
||||
checkFlag = false
|
||||
for i in attributes
|
||||
for i in dictkey
|
||||
j = Symbol(i)
|
||||
if j ∉ [:reasoning, :tasting_notes, :occasion, :food_to_be_paired_with_wine]
|
||||
if j ∉ [:comprehension, :tasting_notes, :occasion, :food_to_be_paired_with_wine]
|
||||
# in case j is wine_price it needs to be checked differently because its value is ranged
|
||||
if j == :wine_price
|
||||
if responsedict[:wine_price] != "NA"
|
||||
@@ -509,7 +523,7 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
|
||||
|
||||
conversiontable =
|
||||
"""
|
||||
Conversion Table:
|
||||
<Conversion Table>
|
||||
Intensity level:
|
||||
1 to 2: May correspond to "light-bodied" or a similar description.
|
||||
2 to 3: May correspond to "med light bodied", "medium light" or a similar description.
|
||||
@@ -534,6 +548,7 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
|
||||
3 to 4: May correspond to "medium acidity" or a similar description.
|
||||
4 to 5: May correspond to "semi high acidity" or a similar description.
|
||||
4 to 5: May correspond to "high acidity" or a similar description.
|
||||
</Conversion Table>
|
||||
"""
|
||||
|
||||
systemmsg =
|
||||
@@ -547,67 +562,64 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
|
||||
The preference form requires the following information:
|
||||
sweetness, acidity, tannin, intensity
|
||||
|
||||
You must follow the following guidelines:
|
||||
<You must follow the following guidelines>
|
||||
1) If specific information required in the preference form is not available in the query or there isn't any, mark with 'NA' to indicate this.
|
||||
Additionally, words like 'any' or 'unlimited' mean no information is available.
|
||||
2) Use the conversion table to convert the descriptive word level of sweetness, intensity, tannin, and acidity into a corresponding integer.
|
||||
3) Do not generate other comments.
|
||||
</You must follow the following guidelines>
|
||||
|
||||
You should then respond to the user with the following points:
|
||||
- sweetness_keyword: The exact keywords in the user's query describing the sweetness level of the wine.
|
||||
- sweetness: ( S ), where ( S ) represents integers indicating the range of sweetness levels. Example: 1-2
|
||||
- acidity_keyword: The exact keywords in the user's query describing the acidity level of the wine.
|
||||
- acidity: ( A ), where ( A ) represents integers indicating the range of acidity level. Example: 3-5
|
||||
- tannin_keyword: The exact keywords in the user's query describing the tannin level of the wine.
|
||||
- tannin: ( T ), where ( T ) represents integers indicating the range of tannin level. Example: 1-3
|
||||
- intensity_keyword: The exact keywords in the user's query describing the intensity level of the wine.
|
||||
- intensity: ( I ), where ( I ) represents integers indicating the range of intensity level. Example: 2-4
|
||||
<You should then respond to the user with>
|
||||
Sweetness_keyword: The exact keywords in the user's query describing the sweetness level of the wine.
|
||||
Sweetness: ( S ), where ( S ) represents integers indicating the range of sweetness levels. Example: 1-2
|
||||
Acidity_keyword: The exact keywords in the user's query describing the acidity level of the wine.
|
||||
Acidity: ( A ), where ( A ) represents integers indicating the range of acidity level. Example: 3-5
|
||||
Tannin_keyword: The exact keywords in the user's query describing the tannin level of the wine.
|
||||
Tannin: ( T ), where ( T ) represents integers indicating the range of tannin level. Example: 1-3
|
||||
Intensity_keyword: The exact keywords in the user's query describing the intensity level of the wine.
|
||||
Intensity: ( I ), where ( I ) represents integers indicating the range of intensity level. Example: 2-4
|
||||
</You should then respond to the user with>
|
||||
|
||||
You should only respond in the form (JSON) as described below:
|
||||
{
|
||||
"sweetness_keyword": ...,
|
||||
"sweetness": ...,
|
||||
"acidity_keyword": ...,
|
||||
"acidity": ...,
|
||||
"tannin_keyword": ...,
|
||||
"tannin": ...,
|
||||
"intensity_keyword": ...,
|
||||
"intensity": ...
|
||||
}
|
||||
<You should only respond in format as described below>
|
||||
Sweetness_keyword: ...
|
||||
Sweetness: ...
|
||||
Acidity_keyword: ...
|
||||
Acidity: ...
|
||||
Tannin_keyword: ...
|
||||
Tannin: ...
|
||||
Intensity_keyword: ...
|
||||
Intensity: ...
|
||||
</You should only respond in format as described below>
|
||||
|
||||
Here are some examples:
|
||||
<Here are some examples>
|
||||
User's query: I want a wine with a medium-bodied, low acidity, medium tannin.
|
||||
{
|
||||
"sweetness_keyword": "NA",
|
||||
"sweetness": "NA",
|
||||
"acidity_keyword": "low acidity",
|
||||
"acidity": "1-2",
|
||||
"tannin_keyword": "medium tannin",
|
||||
"tannin": "3-4",
|
||||
"intensity_keyword": "medium-bodied",
|
||||
"intensity": "3-4"
|
||||
}
|
||||
|
||||
Sweetness_keyword: NA
|
||||
Sweetness: NA
|
||||
Acidity_keyword: low acidity
|
||||
Acidity: 1-2
|
||||
Tannin_keyword: medium tannin
|
||||
Tannin: 3-4
|
||||
Intensity_keyword: medium-bodied
|
||||
Intensity: 3-4
|
||||
|
||||
User's query: German red wine, under 100, pairs with spicy food
|
||||
{
|
||||
"sweetness_keyword": "NA",
|
||||
"sweetness": "NA",
|
||||
"acidity_keyword": "NA",
|
||||
"acidity": "NA",
|
||||
"tannin_keyword": "NA",
|
||||
"tannin": "NA",
|
||||
"intensity_keyword": "NA",
|
||||
"intensity": "NA"
|
||||
}
|
||||
|
||||
Sweetness_keyword: NA
|
||||
Sweetness: NA
|
||||
Acidity_keyword: NA
|
||||
Acidity: NA
|
||||
Tannin_keyword: NA
|
||||
Tannin: NA
|
||||
Intensity_keyword: NA
|
||||
Intensity: NA
|
||||
</Here are some examples>
|
||||
|
||||
Let's begin!
|
||||
"""
|
||||
|
||||
header = ["Sweetness_keyword:", "Sweetness:", "Acidity_keyword:", "Acidity:", "Tannin_keyword:", "Tannin:", "Intensity_keyword:", "Intensity:"]
|
||||
dictkey = ["sweetness_keyword", "sweetness", "acidity_keyword", "acidity", "tannin_keyword", "tannin", "intensity_keyword", "intensity"]
|
||||
errornote = ""
|
||||
|
||||
for attempt in 1:5
|
||||
for attempt in 1:10
|
||||
usermsg =
|
||||
"""
|
||||
$conversiontable
|
||||
@@ -622,14 +634,22 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
|
||||
]
|
||||
|
||||
# put in model format
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
|
||||
prompt *=
|
||||
"""
|
||||
<|start_header_id|>assistant<|end_header_id|>
|
||||
"""
|
||||
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
|
||||
|
||||
response = a.func[:text2textInstructLLM](prompt)
|
||||
responsedict = copy(JSON3.read(response))
|
||||
|
||||
# check whether response has all header
|
||||
detected_kw = GeneralUtils.detect_keyword(header, response)
|
||||
if sum(values(detected_kw)) < length(header)
|
||||
errornote = "\nYiemAgent extractWineAttributes_2() response does not have all header"
|
||||
continue
|
||||
elseif sum(values(detected_kw)) > length(header)
|
||||
errornote = "\nYiemAgent extractWineAttributes_2() response has duplicated header"
|
||||
continue
|
||||
end
|
||||
|
||||
responsedict = GeneralUtils.textToDict(response, header;
|
||||
dictKey=dictkey, symbolkey=true)
|
||||
|
||||
# check whether each describing keyword is in the input to prevent halucination
|
||||
for i in ["sweetness", "acidity", "tannin", "intensity"]
|
||||
@@ -753,8 +773,11 @@ function paraphrase(text2textInstructLLM::Function, text::String)
|
||||
response = replace(response, '$' => "USD")
|
||||
response = replace(response, '`' => "")
|
||||
response = GeneralUtils.remove_french_accents(response)
|
||||
responsedict = GeneralUtils.textToDict(response, ["Paraphrase"],
|
||||
rightmarker=":", symbolkey=true, lowercasekey=true)
|
||||
|
||||
header = ["Paraphrase:"]
|
||||
dictkey = ["paraphrase"]
|
||||
responsedict = GeneralUtils.textToDict(response, header;
|
||||
dictKey=dictkey, symbolkey=true)
|
||||
|
||||
for i ∈ [:paraphrase]
|
||||
if length(JSON3.write(responsedict[i])) == 0
|
||||
|
||||
@@ -31,6 +31,10 @@
|
||||
"description": "organization name"
|
||||
},
|
||||
"externalservice": {
|
||||
"loadbalancer": {
|
||||
"mqtttopic": "/loadbalancer/requestingservice",
|
||||
"description": "text to text service with instruct LLM"
|
||||
},
|
||||
"text2textinstruct": {
|
||||
"mqtttopic": "/loadbalancer/requestingservice",
|
||||
"description": "text to text service with instruct LLM",
|
||||
@@ -51,6 +55,22 @@
|
||||
"llminfo": {
|
||||
"name": "llama3instruct"
|
||||
}
|
||||
},
|
||||
"wineDB" : {
|
||||
"description": "A wine database connection info for LibPQ client",
|
||||
"host": "192.168.88.12",
|
||||
"port": 10201,
|
||||
"dbname": "wineDB",
|
||||
"user": "yiemtechnologies",
|
||||
"password": "yiemtechnologies@Postgres_0.0"
|
||||
},
|
||||
"SQLVectorDB" : {
|
||||
"description": "A wine database connection info for LibPQ client",
|
||||
"host": "192.168.88.12",
|
||||
"port": 10203,
|
||||
"dbname": "SQLVectorDB",
|
||||
"user": "yiemtechnologies",
|
||||
"password": "yiemtechnologies@Postgres_0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,9 +0,0 @@
|
||||
using GeneralUtils
|
||||
|
||||
response = "trajectory_evaluation:\nThe trajectory is correct so far. The thought accurately reflects the user's question, and the action taken is a valid attempt to retrieve data from the database that matches the specified criteria.\n\nanswer_evaluation:\nThe observation provides information about two red wines from Bordeaux rive droite in France, which partially answers the question. However, it does not provide a complete answer as it only lists the wine names and characteristics, but does not explicitly state whether there are any other wines that match the criteria.\n\naccepted_as_answer: No\n\nscore: 6\nThe trajectory is mostly correct, but the observation does not fully address the question.\n\nsuggestion: Consider adding more filters or parameters to the database query to retrieve a complete list of wines that match the specified criteria."
|
||||
|
||||
responsedict = GeneralUtils.textToDict(response,
|
||||
["trajectory_evaluation", "answer_evaluation", "accepted_as_answer", "score", "suggestion"],
|
||||
rightmarker=":", symbolkey=true)
|
||||
|
||||
|
||||
0
test/runtests.jl
Normal file
0
test/runtests.jl
Normal file
@@ -8,31 +8,41 @@ using Base.Threads
|
||||
|
||||
|
||||
# load config
|
||||
config = JSON3.read("./test/config.json")
|
||||
config = JSON3.read("/appfolder/app/dev/YiemAgent/test/config.json")
|
||||
# config = copy(JSON3.read("../mountvolume/config.json"))
|
||||
|
||||
|
||||
function executeSQL(sql::T) where {T<:AbstractString}
|
||||
DBconnection = LibPQ.Connection("host=192.168.88.12 port=10201 dbname=wineDB user=yiemtechnologies password=yiemtechnologies@Postgres_0.0")
|
||||
host = config[:externalservice][:wineDB][:host]
|
||||
port = config[:externalservice][:wineDB][:port]
|
||||
dbname = config[:externalservice][:wineDB][:dbname]
|
||||
user = config[:externalservice][:wineDB][:user]
|
||||
password = config[:externalservice][:wineDB][:password]
|
||||
DBconnection = LibPQ.Connection("host=$host port=$port dbname=$dbname user=$user password=$password")
|
||||
result = LibPQ.execute(DBconnection, sql)
|
||||
close(DBconnection)
|
||||
return result
|
||||
end
|
||||
|
||||
function executeSQLVectorDB(sql)
|
||||
DBconnection = LibPQ.Connection("host=192.168.88.12 port=10203 dbname=SQLVectorDB user=yiemtechnologies password=yiemtechnologies@Postgres_0.0")
|
||||
host = config[:externalservice][:SQLVectorDB][:host]
|
||||
port = config[:externalservice][:SQLVectorDB][:port]
|
||||
dbname = config[:externalservice][:SQLVectorDB][:dbname]
|
||||
user = config[:externalservice][:SQLVectorDB][:user]
|
||||
password = config[:externalservice][:SQLVectorDB][:password]
|
||||
DBconnection = LibPQ.Connection("host=$host port=$port dbname=$dbname user=$user password=$password")
|
||||
result = LibPQ.execute(DBconnection, sql)
|
||||
close(DBconnection)
|
||||
return result
|
||||
end
|
||||
|
||||
function text2textInstructLLM(prompt::String)
|
||||
function text2textInstructLLM(prompt::String; maxattempt=3)
|
||||
msgMeta = GeneralUtils.generate_msgMeta(
|
||||
config[:externalservice][:text2textinstruct][:mqtttopic];
|
||||
config[:externalservice][:loadbalancer][:mqtttopic];
|
||||
msgPurpose="inference",
|
||||
senderName="yiemagent",
|
||||
senderId=string(uuid4()),
|
||||
receiverName="text2textinstruct",
|
||||
senderId=sessionId,
|
||||
receiverName="text2textinstruct_small",
|
||||
mqttBrokerAddress=config[:mqttServerInfo][:broker],
|
||||
mqttBrokerPort=config[:mqttServerInfo][:port],
|
||||
)
|
||||
@@ -48,8 +58,20 @@ function text2textInstructLLM(prompt::String)
|
||||
)
|
||||
)
|
||||
|
||||
_response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=6000)
|
||||
response = nothing
|
||||
for attempts in 1:maxattempt
|
||||
_response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=300, maxattempt=maxattempt)
|
||||
payload = _response[:response]
|
||||
if _response[:success] && payload[:text] !== nothing
|
||||
response = _response[:response][:text]
|
||||
break
|
||||
else
|
||||
println("\n<text2textInstructLLM()> attempt $attempts/$maxattempt failed ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||||
pprintln(outgoingMsg)
|
||||
println("</text2textInstructLLM()> attempt $attempts/$maxattempt failed ", @__FILE__, ":", @__LINE__, " $(Dates.now())\n")
|
||||
sleep(3)
|
||||
end
|
||||
end
|
||||
|
||||
return response
|
||||
end
|
||||
@@ -57,11 +79,11 @@ end
|
||||
# get text embedding from a LLM service
|
||||
function getEmbedding(text::T) where {T<:AbstractString}
|
||||
msgMeta = GeneralUtils.generate_msgMeta(
|
||||
config[:externalservice][:text2textinstruct][:mqtttopic];
|
||||
config[:externalservice][:loadbalancer][:mqtttopic];
|
||||
msgPurpose="embedding",
|
||||
senderName="yiemagent",
|
||||
senderId=string(uuid4()),
|
||||
receiverName="text2textinstruct",
|
||||
senderId=sessionId,
|
||||
receiverName="text2textinstruct_small",
|
||||
mqttBrokerAddress=config[:mqttServerInfo][:broker],
|
||||
mqttBrokerPort=config[:mqttServerInfo][:port],
|
||||
)
|
||||
@@ -72,7 +94,7 @@ function getEmbedding(text::T) where {T<:AbstractString}
|
||||
:text => [text] # must be a vector of string
|
||||
)
|
||||
)
|
||||
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=6000)
|
||||
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=120)
|
||||
embedding = response[:response][:embeddings]
|
||||
return embedding
|
||||
end
|
||||
@@ -80,10 +102,8 @@ end
|
||||
function findSimilarTextFromVectorDB(text::T1, tablename::T2, embeddingColumnName::T3,
|
||||
vectorDB::Function; limit::Integer=1
|
||||
)::DataFrame where {T1<:AbstractString, T2<:AbstractString, T3<:AbstractString}
|
||||
|
||||
# get embedding from LLM service
|
||||
embedding = getEmbedding(text)[1]
|
||||
|
||||
# check whether there is close enough vector already store in vectorDB. if no, add, else skip
|
||||
sql = """
|
||||
SELECT *, $embeddingColumnName <-> '$embedding' as distance
|
||||
@@ -95,29 +115,29 @@ function findSimilarTextFromVectorDB(text::T1, tablename::T2, embeddingColumnNam
|
||||
return df
|
||||
end
|
||||
|
||||
|
||||
function similarSQLVectorDB(query; maxdistance::Integer=100)
|
||||
tablename = "sqlllm_decision_repository"
|
||||
# get embedding of the query
|
||||
df = findSimilarTextFromVectorDB(query, tablename,
|
||||
"function_input_embedding", executeSQLVectorDB)
|
||||
# println(df[1, [:id, :function_output]])
|
||||
row, col = size(df)
|
||||
distance = row == 0 ? Inf : df[1, :distance]
|
||||
# distance = 100 # CHANGE this is for testing only
|
||||
if row != 0 && distance < maxdistance
|
||||
# if there is usable SQL, return it.
|
||||
output_b64 = df[1, :function_output_base64] # pick the closest match
|
||||
output_str = String(base64decode(output_b64))
|
||||
rowid = df[1, :id]
|
||||
println("\n~~~ found similar sql. row id $rowid, distance $distance ", @__FILE__, " ", @__LINE__)
|
||||
println("\n~~~ found similar sql. row id $rowid, distance $distance ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||||
return (dict=output_str, distance=distance)
|
||||
else
|
||||
println("\n~~~ similar sql not found, max distance $maxdistance ", @__FILE__, " ", @__LINE__)
|
||||
println("\n~~~ similar sql not found, max distance $maxdistance ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||||
return (dict=nothing, distance=nothing)
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
function insertSQLVectorDB(query::T1, SQL::T2; maxdistance::Integer=1) where {T1<:AbstractString, T2<:AbstractString}
|
||||
function insertSQLVectorDB(query::T1, SQL::T2; maxdistance::Integer=3) where {T1<:AbstractString, T2<:AbstractString}
|
||||
tablename = "sqlllm_decision_repository"
|
||||
# get embedding of the query
|
||||
# query = state[:thoughtHistory][:question]
|
||||
@@ -134,8 +154,8 @@ function insertSQLVectorDB(query::T1, SQL::T2; maxdistance::Integer=1) where {T1
|
||||
sql = """
|
||||
INSERT INTO $tablename (function_input, function_output, function_output_base64, function_input_embedding) VALUES ('$query', '$sql_', '$sql_base64', '$query_embedding');
|
||||
"""
|
||||
println("\n~~~ added new decision to vectorDB ", @__FILE__, " ", @__LINE__)
|
||||
println(sql)
|
||||
# println("\n~~~ added new decision to vectorDB ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||||
# println(sql)
|
||||
_ = executeSQLVectorDB(sql)
|
||||
end
|
||||
end
|
||||
Reference in New Issue
Block a user