update
This commit is contained in:
102
src/interface.jl
102
src/interface.jl
@@ -320,21 +320,57 @@ function reflector()
|
||||
"""
|
||||
You are an advanced reasoning agent that can improve based on self refection.
|
||||
You will be given the information related to the previous help you've done for a user.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
You were unsuccessful in helping the user either because you guessed the wrong answer with Finish[<answer>], or you used up your set number of reasoning steps.
|
||||
|
||||
|
||||
|
||||
You were unsuccessful in helping the user either because you guessed the wrong answer with Finish[answer], or you used up your set number of reasoning steps.
|
||||
In a few sentences, Diagnose a possible reason for failure and devise a new, concise, high level plan that aims to mitigate the same failure.
|
||||
Use complete sentences.
|
||||
|
||||
Here are some examples:
|
||||
Previous Trial:
|
||||
{
|
||||
"question": "Hello, I would like a get a bottle of wine",
|
||||
"thought_1": "A customer wants to buy a bottle of wine. Before making a recommendation, I need to know more about their preferences.",
|
||||
"action_1": {"name": "chatbox", "input": "What is the occasion for which you're buying this wine?"},
|
||||
"observation_1": "We are holding a wedding party",
|
||||
|
||||
"thought_2": "A wedding party, that's a great occasion! The customer might be looking for a celebratory drink. Let me ask some more questions to narrow down the options.",
|
||||
"action_2": {"name": "chatbox", "input": "What type of food will you be serving at the wedding?"},
|
||||
"observation_2": "It will be Thai dishes.",
|
||||
|
||||
"thought_3": "With Thai food, I should recommend a wine that complements its spicy and savory flavors. And since it's a celebratory occasion, the customer might prefer a full-bodied wine.",
|
||||
"action_3": {"name": "chatbox", "input": "What is your budget for this bottle of wine?"},
|
||||
"observation_3": "I would spend up to 50 bucks.",
|
||||
|
||||
"thought_4": "Now that I have some more information, it's time to narrow down the options.",
|
||||
"action_4": {"name": "winestock", "input": "red wine with full body, pairs well with spicy food, budget \$50"},
|
||||
"observation_4": "I found the following wines in our stock: \n{\n 1: El Enemigo Cabernet Franc 2019\n2: Tantara Chardonnay 2017\n\n}\n",
|
||||
|
||||
"thought_5": "Now that I have a list of potential wines, I need to know more about the customer's taste preferences.",
|
||||
"action_5": {"name": "chatbox", "input": "What type of wine characteristics are you looking for? (e.g. t.e.g. tannin level, sweetness, intensity, acidity)"},
|
||||
"observation_5": "I like full-bodied Red wine with low tannin.",
|
||||
|
||||
"thought_6": "Now that I have more information about the customer's preferences, it's time to make a recommendation.",
|
||||
"action_6": {"name": "recommendbox", "input": "El Enemigo Cabernet Franc 2019"},
|
||||
"observation_6": "I don't like the one you recommend. I want dry wine."
|
||||
}
|
||||
Reflection: I searched one of the prime ministers involved in the signing, then attemted to answer right away. I should have searched each of the prime ministers, then looked up 'death' on each of their pages in order to get more information before answering.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Question: The Rome Protocols were signed by three Prime Ministers one of which was assassinated as part of what?
|
||||
Thought 1: I need to search Rome Protocols, find the three Prime Ministers, then find what they were assassinated as part of.
|
||||
Action 1: Search[Rome Protocols]
|
||||
@@ -386,6 +422,52 @@ function reflector()
|
||||
Previous trial:
|
||||
{trajectory}Reflection:
|
||||
"""
|
||||
|
||||
# apply LLM specific instruct format
|
||||
externalService = a.config[:externalservice][:text2textinstruct]
|
||||
llminfo = externalService[:llminfo]
|
||||
prompt =
|
||||
if llminfo[:name] == "llama3instruct"
|
||||
formatLLMtext_llama3instruct("system", _prompt)
|
||||
else
|
||||
error("llm model name is not defied yet $(@__LINE__)")
|
||||
end
|
||||
|
||||
msgMeta = GeneralUtils.generate_msgMeta(
|
||||
a.config[:externalservice][:text2textinstruct][:mqtttopic],
|
||||
senderName= "progressValueEstimator",
|
||||
senderId= a.id,
|
||||
receiverName= "text2textinstruct",
|
||||
mqttBroker= a.config[:mqttServerInfo][:broker],
|
||||
mqttBrokerPort= a.config[:mqttServerInfo][:port],
|
||||
)
|
||||
|
||||
outgoingMsg = Dict(
|
||||
:msgMeta=> msgMeta,
|
||||
:payload=> Dict(
|
||||
:text=> prompt,
|
||||
:kwargs=> Dict(
|
||||
:max_tokens=> 512,
|
||||
:stop=> ["<|eot_id|>"],
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
_response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
|
||||
_thoughtJsonStr = _response[:response][:text]
|
||||
|
||||
# expectedJsonExample =
|
||||
# """
|
||||
# Here is an expected JSON format:
|
||||
# {"evaluation": "...", "score": "..."}
|
||||
# """
|
||||
# resultJsonStr = jsoncorrection(a, _thoughtJsonStr, expectedJsonExample)
|
||||
# resultDict = copy(JSON3.read(resultJsonStr))
|
||||
|
||||
# return resultDict[:evaluation], resultDict[:score]
|
||||
|
||||
|
||||
return _thoughtJsonStr
|
||||
end
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user