This commit is contained in:
2023-12-18 12:03:58 +00:00
parent 4c8f021d40
commit 3f17e10009
4 changed files with 150 additions and 122 deletions

View File

@@ -187,7 +187,7 @@ function planner_mistral_openorca(a::agentReflex)
- wine price range: ask the user
- ambient temperature at the serving location: ask the user
- wines we have in stock
You provide a personalized recommendation of up to two wines based on the user's info above, and you describe the benefits of each wine in detail.
You provide a personalized recommendation of wine based on the user's info above by describing the benefits of each wine in detail.
You have access to the following tools:
$toollines
@@ -196,8 +196,8 @@ function planner_mistral_openorca(a::agentReflex)
Objective: the objective you intend to do
Aware: ask yourself what info you don't have?
Plan: first you should always think about the objective, the info you have, the info you don't have and thoroughly then extract and devise a complete, step by step plan (pay attention to correct numeral calculation and commonsense).
p.s.1 each step of the plan should be a single action.
p.s.2 ask the user all you need to know and then search your inventory.
P.S.1 each step of the plan should be a single action.
P.S.2 ask the user all you need to know and then search your inventory.
<|im_end|>
$conversation
@@ -205,15 +205,16 @@ function planner_mistral_openorca(a::agentReflex)
Objective:
"""
#WORKING remove () in steps as LLM sometimes use for (addtional info) but interfere with updatePlan
result = sendReceivePrompt(a, assistant_plan_prompt, max_tokens=512, temperature=0.1)
result = sendReceivePrompt(a, assistant_plan_prompt, max_tokens=512, temperature=0.1)
x = split(result, "<|im_end|>")[1]
@show x
x = split(x, "Step")[1]
x = split(x, "Plan:")
objective = x[1]
plan = x[2]
x = split(result, "<|im_end|>")[1]
@show x
x = split(x, "Step")[1]
x = split(x, "Plan:")
objective = x[1]
plan = x[2]
return objective, plan
end
@@ -221,7 +222,7 @@ end
""" Update the current plan.
"""
function updatePlan(a::agentReflex)
conversation = messagesToString_nomark(a.messages)
# conversation = messagesToString_nomark(a.messages)
toollines = ""
for (toolname, v) in a.tools
@@ -238,10 +239,10 @@ function updatePlan(a::agentReflex)
<|im_start|>system
$(a.roles[a.role])
The required info you need for wine recommendation:
- wine price range: ask the user
- user's personal taste of wine: ask the user
- type of food: ask the user
- occasion: ask the user
- user's personal taste of wine: ask the user
- wine price range: ask the user
- ambient temperature at the serving location: ask the user
- wines we have in stock
You provide a personalized recommendation of up to two wines based on the user's info above, and you describe the benefits of each wine in detail.
@@ -249,19 +250,22 @@ function updatePlan(a::agentReflex)
You have access to the following tools:
$toollines
Your talk with the user:
$conversation
Your work:
$work
Your job is to update the plan if the info is available from your conversation with the user or your work.
Your job is to update the plan using available info from your work.
P.S. do not update if no info available.
For example:
Plan: 1. Ask the user for their food type.
Obs: It will be Thai dishes.
Updated plan: 1. Ask the user for their food type (Thai dishes).
<|im_end|>
New plan:
Updated plan:
"""
result = sendReceivePrompt(a, prompt, max_tokens=512, temperature=0.1)
@show updatedPlan = result
a.memory[:shortterm]["Plan 0:"] = result
end
@@ -303,13 +307,13 @@ function actor_mistral_openorca(a::agentReflex)
shorttermMemory = dictToString(a.memory[:shortterm], skiplist=["user:"])
conversation = messagesToString_nomark(a.messages, addressAIas="I")
# conversation = messagesToString_nomark(a.messages, addressAIas="I")
context =
"""
Your talk with the user:
$conversation
"""
# context =
# """
# Your talk with the user:
# $conversation
# """
prompt =
"""
@@ -318,7 +322,6 @@ function actor_mistral_openorca(a::agentReflex)
You have access to the following tools:
$toollines
$(a.thinkingFormat[:actor])
$context
<|im_end|>
$shorttermMemory
Thought $(a.step):
@@ -326,9 +329,75 @@ function actor_mistral_openorca(a::agentReflex)
prompt = replace(prompt, "{toolnames}" => toolnames)
prompt = replace(prompt, "{step}" => a.step)
response = nothing
chunkedtext = nothing
tempcounter = 0.0
while true # while Thought is empty, run actor again
tempcounter += 0.1
@show tempcounter
response = sendReceivePrompt(a, prompt, temperature=tempcounter)
response = splittext(response, ["Obs", "<|im_end|>"])
return prompt
if !occursin("Thought", response)
response = "Thought: " * response
end
headerToDetect = ["Question:", "Plan:", "Thought:",
"Act:", "Actinput:", "Obs:", "...",
"Answer:", "Conclusion:", "Summary:"]
# replace headers with headers with correct attempt and step number
response = replaceHeaders(response, headerToDetect, a.step)
headers = detectCharacters(response, headerToDetect)
println("")
response_actor = response
@show response_actor
headerToDetect = ["Plan $(a.attempt):",
"Thought $(a.step):",
"Act $(a.step):",
"Actinput $(a.step):",
"Obs $(a.step):",
"Check $(a.step):",]
headers = detectCharacters(response, headerToDetect)
chunkedtext = chunktext(response, headers)
@show chunkedtext
# assuming length more than 10 character means LLM has valid thinking
if length(chunkedtext["Thought $(a.step):"]) > 10
break
end
end
toolname = toolNameBeingCalled(chunkedtext["Act $(a.step):"], a.tools)
toolinput = chunkedtext["Actinput $(a.step):"]
# change trailing number to continue a.memory[:shortterm]
_latest_step = keys(a.memory[:shortterm])
_latest_step = [i for i in _latest_step]
_latest_step = _latest_step[end]
latest_step = parse(Int, _latest_step[end-2:end-1])
headerToDetect = ["Question:", "Plan:", "Thought:",
"Act:", "Actinput:", "Obs:", "...",
"Answer:", "Conclusion:", "Summary:"]
response = replaceHeaders(response, headerToDetect, latest_step+1)
headerToDetect = ["Plan $(a.attempt):",
"Thought $latest_step:",
"Act $latest_step:",
"Actinput $latest_step:",
"Obs $latest_step:",
"Check $latest_step:",]
headers = detectCharacters(response, headerToDetect)
chunkedtext = chunktext(response, headers)
# add to memory
addShortMem!(a.memory[:shortterm], chunkedtext)
a.memory[:log] = addShortMem!(a.memory[:log], chunkedtext)
error(1)
return toolname, toolinput
end
@@ -406,6 +475,13 @@ function work(a::agentReflex)
workstate = nothing
response = nothing
# user answering LLM -> Obs
if haskey(a.memory[:shortterm], "Act $(a.step):")
if occursin("chatbox", a.memory[:shortterm]["Act $(a.step):"])
a.memory[:shortterm]["Obs $(a.step):"] = a.messages[end][:content]
end
end
while true # Work loop
objective = nothing
@@ -414,19 +490,28 @@ function work(a::agentReflex)
toolname = nothing
toolinput = nothing
if length(a.memory[:shortterm]) != 0
updatePlan(a)
@show updatedPlan = a.memory[:shortterm]["Plan 0:"]
else
objective, plan = planner_mistral_openorca(a)
a.memory[:shortterm]["Plan $(a.attempt):"] = plan
a.memory[:log]["Plan $(a.attempt):"] = plan
# if length(a.memory[:shortterm]) != 0
# updatePlan(a)
# @show updatedPlan = a.memory[:shortterm]["Plan 0:"]
# else
# objective, plan = planner_mistral_openorca(a)
# a.memory[:shortterm]["Objective:"] = objective
# a.memory[:shortterm]["Plan $(a.attempt):"] = plan
# a.memory[:log]["Plan $(a.attempt):"] = plan
println("")
@show objective
@show plan
end
# println("")
# @show objective
# @show plan
# end
objective, plan = planner_mistral_openorca(a)
a.memory[:shortterm]["Objective:"] = objective
a.memory[:shortterm]["Plan $(a.attempt):"] = plan
a.memory[:log]["Plan $(a.attempt):"] = plan
println("")
@show objective
@show plan
# sometimes LLM add not-need word I don't want
@@ -461,7 +546,7 @@ function work(a::agentReflex)
a.memory[:log]["response $(a.attempt):"] = response
# evaluate. if score > 6/10 good enough.
guideline = writeEvaluationGuideline(a, objective)
guideline = writeEvaluationGuideline(a)
println("")
@show guideline
@@ -529,13 +614,8 @@ end
actorState = "chatbox"
msgToUser = "message from assistant to user"
""" #WORKING add Obs from Chatbox
"""
function actor(a::agentReflex)
if haskey(a.memory[:shortterm], "Act $(a.step):")
if a.memory[:shortterm]["Act $(a.step):"] == "chatbox"
a.memory[:shortterm]["Obs $(a.step):"] = a.messages[end][:content]
end
end
actorState = nothing
msgToUser = nothing
@@ -563,72 +643,25 @@ function actor(a::agentReflex)
@show a.step
if a.step < totalsteps -1 # the last step of the plan is responding, let work() do this part
if a.step < totalsteps # the last step of the plan is responding, let work() do this part
# check whether LLM already complete the current step
iscomplete = checkStepCompletion(a)
@show iscomplete
toolname, toolinput = actor_mistral_openorca(a)
@show toolname
@show toolinput
if iscomplete == false
prompt_actor = actor_mistral_openorca(a)
println("")
@show prompt_actor
response = sendReceivePrompt(a, prompt_actor)
response = splittext(response, ["Obs", "<|im_end|>"])
if !occursin("Thought", response)
response = "Thought: " * response
end
headerToDetect = ["Question:", "Plan:", "Thought:",
"Act:", "Actinput:", "Obs:", "...",
"Answer:", "Conclusion:", "Summary:"]
# replace headers with headers with correct attempt and step number
response = replaceHeaders(response, headerToDetect, a.step)
headers = detectCharacters(response, headerToDetect)
println("")
response_actor = response
@show response_actor
headerToDetect = ["Plan $(a.attempt):",
"Thought $(a.step):",
"Act $(a.step):",
"Actinput $(a.step):",
"Obs $(a.step):",
"Check $(a.step):",]
headers = detectCharacters(response, headerToDetect)
chunkedtext = chunktext(response, headers)
# add to memory
a.memory[:shortterm] = addShortMem!(a.memory[:shortterm], chunkedtext)
a.memory[:log] = addShortMem!(a.memory[:log], chunkedtext)
toolname = toolNameBeingCalled(chunkedtext["Act $(a.step):"], a.tools)
@show toolname
if toolname == "chatbox" # chat with user
msgToUser = chunkedtext["Actinput $(a.step):"]
actorState = toolname
break
elseif toolname == "N/A"
a.step += 1
else # function call
f = a.tools[toolname][:func]
toolinput = chunkedtext["Actinput $(a.step):"]
toolresult = f(a, toolinput)
@show toolresult
a.memory[:shortterm]["Obs $(a.step):"] = toolresult
a.memory[:log]["Obs $(a.step):"] = toolresult
end
else
# already complete this step, go to the next step
if toolname == "chatbox" # chat with user
msgToUser = toolinput
msgToUser = split(msgToUser, "\n\n")[1]
actorState = toolname
break
elseif toolname == "noaction"
a.step += 1
else # function call
f = a.tools[toolname][:func]
toolresult = f(a, toolinput)
@show toolresult
a.memory[:shortterm]["Obs $(a.step):"] = toolresult
a.memory[:log]["Obs $(a.step):"] = toolresult
end
else
actorState = "all steps done"
@@ -661,7 +694,7 @@ julia> usermsg = "What's AMD latest product?"
julia> evaluationGuideLine = writeEvaluationGuideline(agent, usermsg)
```
"""
function writeEvaluationGuideline(a::agentReflex, usermsg::T) where {T<:AbstractString}
function writeEvaluationGuideline(a::agentReflex)
prompt =
"""
<|im_start|>system
@@ -670,7 +703,7 @@ function writeEvaluationGuideline(a::agentReflex, usermsg::T) where {T<:Abstrac
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
Your work:
$usermsg
$(a.memory[:shortterm]["Objective:"])
Your job are:
1. Write an evaluation guideline for your work in order to be able to evaluate your response.

View File

@@ -79,18 +79,13 @@ function winestock(a::agentReflex, phrase::T) where {T<:AbstractString}
"""
1. Name: Louis Latou - Corton-Charlamagne - Chardonnay,
Description: Corton-Charlemagne 2018 is a powerful, complex wine. Its nose is intense, with notes of white stone fruits such as white peach, fresh hazelnut, vanilla, and almond paste. The wine is full-bodied for the palate, and the vanilla is complemented by aromas of fresh almond and lime blossom. The experience ends with a very fine aromatic aftertaste that has subtle saline notes.,
Price: 49 dollars,
Price: 55 dollars,
ID: ws-114
2. Name: Chateau de Beaucastel Hommage Jacques Perrin Chateauneuf-du-Pape,
Year: 2019,
Description: The quintessence of Château de Beaucastel, Hommage à Jacques Perrin delights us every year, and the 2019 vintage is no exception. To the eye it offers a splendid deep red color, verging on black. Full of power and supremely elegant, the nose is of magnificent aromatic complexity with notes of black fruit and spices that offer all the characteristic expression of Mourvèdre. Perfectly balanced by an incredible freshness, the mouth is eminently elegant with intense and complex aromas of great subtlety, a full, refined texture, subtle tannins of great finesse, and infinite length. A great classic Hommage à Jacques Perrin.,
Price: 42,
Price: 40,
ID: ed-23
3. Name: M. Chapoutier Ermitage l'Ermite Blanc,
Year: 2017
Description: Brilliant pale yellow. Complex aromas of vanilla, almonds, dried fruits and linden-tree. The mineraliaty is marked (typical of soil). Very round and rich wine. An elegant balance, of very ripe white fruit aromas (peach and apricot) and light notes of minerality. Beautiful length and complexity.,
Price: 13,
ID: wwr-259
"""
return result
end

View File

@@ -162,12 +162,12 @@ function agentReflex(
Use the following format:
Stimulus: the input user gives to you and you must respond
Plan: first you should always think about the stimulus, the info you need and the info you have thoroughly then extract and devise a step by step plan (pay attention to correct numeral calculation and commonsense).
p.s.1 each step should be a single action.
P.S.1 each step should be a single action.
""",
:actor=>
"""
Use the following format:
Thought: you should always think about do you have all the required info and what to do according to step {step} of the plan and the info you have (pay attention to correct numeral calculation and commonsense).
Thought: you should always think about how to carry out step {step} of the plan (pay attention to correct numeral calculation and commonsense).
Act: the action to take that match your thought, should be one of [{toolnames}]
Actinput: your input to the action you chose (pay attention to the tool's input)
Obs: the result of the action

View File

@@ -406,7 +406,7 @@ function isUseTools(a::agentReflex)
response = sendReceivePrompt(a, prompt, temperature=0.0)
response = split(response, "<|im_end|>")[1]
for (toolname, v) in a.tools
if occursin(toolname, response)
if occursin(toolname, String(response))
isusetool = true
break
end
@@ -867,6 +867,7 @@ end
Arguments:
text = a text you want to split
headers = a list of keywords you want to add step and substep to
step = a number you want to add
Return:
a leftmost text after split
@@ -884,11 +885,10 @@ function replaceHeaders(text::T, headers, step::Int) where {T<:AbstractString}
header = i[1:end-1] # not include ":"
if occursin(header, newtext)
startind = findfirst(header, newtext)[1]
stopind = findnext(":", newtext, startind+1)[end]
stopind = findnext(":", newtext, startind+1)[end] #BUG MethodError: no method matching lastindex(::Nothing)
word = newtext[startind: stopind]
newword = "$header $step:"
newtext = replace(newtext, word=> newword)
end
end