This commit is contained in:
narawat lamaiin
2024-08-09 18:14:51 +07:00
parent debb7004d6
commit b918b96a2d

View File

@@ -209,7 +209,7 @@ function decisionMaker(a::T)::Dict{Symbol, Any} where {T<:agent}
# Let's begin!
# """
# QandA = generatequestion(a, text2textInstructLLM)
QandA = generatequestion(a, a.text2textInstructLLM)
systemmsg =
"""
@@ -224,6 +224,7 @@ function decisionMaker(a::T)::Dict{Symbol, Any} where {T<:agent}
You MUST follow the following guidelines:
- Generally speaking, your inventory has some wines from France, the United States, Australia, Spain, and Italy, but you won't know which wines your store carries until you check your inventory.
- All wines in your inventory are always in stock.
- Use the "understand-then-check" inventory strategy to understand the user, as there are many wines in the inventory.
- Do not ask the user about wine's flavor e.g. floral, citrusy, nutty or some thing similar.
- After the user chose the wine, congratulate the user and end the conversation politely. Don't offer any extra services.
@@ -272,7 +273,7 @@ function decisionMaker(a::T)::Dict{Symbol, Any} where {T<:agent}
for attempt in 1:10
usermsg =
"""
Your conversation with the user: $chathistory)
Your conversation with the user: $chathistory
$context
$errornote
"""
@@ -1272,7 +1273,7 @@ function generatechat(memory::Dict, chathistory::Vector, text2textInstructLLM::F
for attempt in 1:5
usermsg =
"""
Your conversation with the user: $chathistory)
Your conversation with the user: $chathistory
$context
Your thoughts: $(memory[:CHATBOX])
$errornote
@@ -1346,21 +1347,23 @@ function generatequestion(a, text2textInstructLLM::Function)::String
systemmsg =
"""
You are a helpful assistant that generate multiple questions about the current situation.
You are a helpful assistant acting as a polite, website-based sommelier for an online wine store.
Your task is to questioning the current situation.
At each round of conversation, you will be given the current situation:
User query: What's the user preferences about wine?
Your work progress: ...
Your conversation with the user: ...
Context: ...
You must follow the following guidelines:
1) Ask at least three questions but no more than five.
2) Your question should be specific, self-contained and not require any additional context.
3) Do not generate any question or comments at the end.
1) Ask at least two questions but no more than five.
2) Your question must be specific to helping the user based on the current situation.
3) Your question should be specific, self-contained and not require any additional context.
4) Do not generate any question or comments at the end.
You should then respond to the user with:
- Reasoning: State your detailed reasoning of the current situation
- Q: Your question
- A: Your answer to the question.
- A: Your answer to the question. Try to answer as best as you can.
You must only respond in format as described below:
@@ -1376,38 +1379,41 @@ function generatequestion(a, text2textInstructLLM::Function)::String
Let's begin!
"""
workprogress = ""
for (k, v) in state[:thoughtHistory]
if k [:query]
workprogress *= "$k: $v\n"
end
context =
if length(a.memory[:shortmem]) > 0
vectorOfDictToText(a.memory[:shortmem], withkey=false)
else
""
end
usermsg =
"""
$(context[:tablelist])
User query: $(state[:thoughtHistory][:question])
Your work progress: $workprogress
"""
_prompt =
[
Dict(:name=> "system", :text=> systemmsg),
Dict(:name=> "user", :text=> usermsg)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt, "llama3instruct")
prompt *=
"""
<|start_header_id|>assistant<|end_header_id|>
"""
chathistory = vectorOfDictToText(a.chathistory)
errornote = ""
response = nothing # store for show when error msg show up
for attempt in 1:10
usermsg =
"""
Your conversation with the user: $chathistory
$context
$errornote
"""
_prompt =
[
Dict(:name=> "system", :text=> systemmsg),
Dict(:name=> "user", :text=> usermsg)
]
# put in model format
prompt = GeneralUtils.formatLLMtext(_prompt, "llama3instruct")
prompt *=
"""
<|start_header_id|>assistant<|end_header_id|>
"""
try
response = text2textInstructLLM(prompt)
q_number = count("Q ", response)
if q_number < 3
if q_number < 1
error("too few questions only $q_number questions are generated ", @__FILE__, " ", @__LINE__)
end
println("--> generatequestion ", @__FILE__, " ", @__LINE__)