This commit is contained in:
2023-12-01 12:18:04 +00:00
parent 3e3fa6c789
commit 6fe84e0cb1
4 changed files with 170 additions and 73 deletions

View File

@@ -288,20 +288,20 @@ function planner_mistral_openorca(a::agentReflex)
{usermsg}
<|im_end|>
<|im_start|>assistant
Plan $(a.attempt):
"""
prompt = replace(prompt, "{role}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:planner])
roleSpecificKnowledge =
"""
You provide a personalized recommendation of up to two wines based on the user's preference, and you describe the benefits of each wine in detail.
Info you need from the user to be able to help them selecting their best wine:
- type of food
- occasion
- user's personal taste of wine
- wine price range
- temperature at the serving location
- wine we have in stock
- wines we have in stock
You job is to provide a personalized recommendation of up to two wines based on the user's info above, and you describe the benefits of each wine in detail.
"""
prompt = replace(prompt, "{roleSpecificKnowledge}" => roleSpecificKnowledge)
toolnames = ""
@@ -333,6 +333,7 @@ function planner_mistral_openorca(a::agentReflex)
return prompt
end
function actor_mistral_openorca(a::agentReflex)
"""
general prompt format:
@@ -370,7 +371,6 @@ function actor_mistral_openorca(a::agentReflex)
{context}
<|im_end|>
{shorttermMemory}
Thought $(a.step):
"""
prompt = replace(prompt, "{role}" => a.roles[a.role])
@@ -403,7 +403,7 @@ function actor_mistral_openorca(a::agentReflex)
{longterm memory}
{plan}
"""
# context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
context = replace(context, "{env state}" => "")
context = replace(context, "{longterm memory}" => "")
context = replace(context, "{plan}" => "My plan:\n$(a.memory[:shortterm]["Plan 1:"])")
@@ -633,10 +633,12 @@ function work(a::agentReflex, usermsg::String)
_ = addNewMessage(a, "user", usermsg)
a.memory[:shortterm]["user:"] = usermsg
a.memory[:log]["user:"] = usermsg
a.newplan = true
a.attempt = 0
elseif a.thinkingmode == :continue_thinking #TODO
error("continue_thinking $(@__LINE__)")
println("continue_thinking!!")
_ = addNewMessage(a, "user", usermsg)
a.memory[:shortterm]["Obs $(a.step):"] = usermsg
a.memory[:shortterm]["Obs $(a.step)-$(a.substep):"] = usermsg
a.memory[:log]["Obs $(a.step):"] = usermsg
else
error("undefined condition thinkingmode = $thinkingmode $(@__LINE__)")
@@ -651,20 +653,25 @@ function work(a::agentReflex, usermsg::String)
toolname = nothing
toolinput = nothing
prompt = planner_mistral_openorca(a)
@show prompt
respond = sendReceivePrompt(a, prompt)
if a.newplan == true
prompt_plan = planner_mistral_openorca(a)
println("")
@show prompt_plan
respond = sendReceivePrompt(a, prompt_plan, max_tokens=1024)
# sometimes LLM add not-need word I don't want
# plan = split(respond, "<|im_end|>")[1]
# plan = split(plan, "Response:")[1]
# plan = split(plan, "Execution:")[1]
# plan = split(plan, "Result:")[1]
# plan = split(plan, "Recommendation:")[1]
plan = splittext(respond, ["<|im_end|>", "Response:", "Execution:", "Result:", "Recommendation:"])
plan = replace(plan, "Plan:"=>"")
a.memory[:shortterm]["Plan $(a.attempt):"] = plan
a.memory[:log]["Plan $(a.attempt):"] = plan
# sometimes LLM add not-need word I don't want
plan = splittext(respond, ["<|im_end|>", "Response", "Execution",
"Result", "Recommendation"])
# plan = replace(plan, "Plan:"=>"")
println("")
@show plan
a.memory[:shortterm]["Plan $(a.attempt):"] = plan
a.memory[:log]["Plan $(a.attempt):"] = plan
a.step = 0
a.substep = 0
a.newplan = false
end
actorstate, msgToUser = actor(a)
if actorstate == "chatbox"
@@ -701,7 +708,9 @@ function work(a::agentReflex, usermsg::String)
chunkedtext = chunktext(lessonwithcontext, headers)
@show chunkedtext
a.memory[:longterm][chunkedtext["Context:"] => chunkedtext["Lesson:"]]
a.newplan = true
error("22222222")
end
else
error("undefied condition, actorstate $actorstate $(@__LINE__)")
@@ -749,17 +758,24 @@ function actor(a::agentReflex)
msgToUser = nothing
totalsteps = checkTotalStepInPlan(a)
a.step = 0
while true # Actor loop
a.step += 1
a.substep += 1
@show a.step
if a.step <= totalsteps
prompt = actor_mistral_openorca(a)
@show prompt
respond = sendReceivePrompt(a, prompt)
prompt_actor = actor_mistral_openorca(a)
println("")
@show prompt_actor
respond = sendReceivePrompt(a, prompt_actor)
respond = split(respond, "Obs")[1]
respond_actor_raw = respond
@show respond_actor_raw
ind = findfirst(":", respond)[end]
respond = respond[ind+1:end]
respond = "Thought: " * respond
# some time LLM not generate a number after headers but I want it
if occursin("Act:", respond)
@@ -767,18 +783,27 @@ function actor(a::agentReflex)
"Act:", "ActInput:", "Obs:", "...",
"Answer:", "Conclusion:", "Summary:"]
headers = detectCharacters(respond, headerToDetect)
respond = addStepNumber(respond, headers, a.step)
respond = replaceHeaders(respond, headers, a.attempt, a.step, a.substep)
end
respond = split(respond, "Obs")[1]
respond = split(respond, "<|im_end|>")[1]
@show respond
respond_actor = respond
println("")
@show respond_actor
headerToDetect = ["Question $(a.step):", "Plan $(a.step):", "Thought $(a.step):",
"Act $(a.step):", "ActInput $(a.step):", "Obs $(a.step):", "...",
"Answer $(a.step):", "Conclusion $(a.step):", "Summary $(a.step):"]
headerToDetect = ["Question $(a.step):",
"Plan $(a.step):",
"Thought $(a.step)-$(a.substep):",
"Act $(a.step)-$(a.substep):",
"ActInput $(a.step)-$(a.substep):",
"Obs $(a.step)-$(a.substep):",
"Check $(a.step)-$(a.substep):",
"Answer $(a.step):",
"Conclusion $(a.step):",
"Summary $(a.step):"]
headers = detectCharacters(respond, headerToDetect)
chunkedtext = chunktext(respond, headers)
@show chunkedtext
@@ -786,8 +811,8 @@ function actor(a::agentReflex)
# add to memory
a.memory[:shortterm] = addShortMem!(a.memory[:shortterm], chunkedtext)
toolname = toolNameBeingCalled(chunkedtext["Act $(a.step):"], a.tools)
toolinput = chunkedtext["ActInput $(a.step):"]
toolname = toolNameBeingCalled(chunkedtext["Act $(a.step)-$(a.substep):"], a.tools)
toolinput = chunkedtext["ActInput $(a.step)-$(a.substep):"]
@show toolname
@show toolinput
@@ -801,11 +826,13 @@ function actor(a::agentReflex)
f = a.tools[Symbol(toolname)][:func]
toolresult = f(a, toolinput)
@show toolresult
a.memory[:shortterm]["Obs $(a.step):"] = toolresult
a.memory[:shortterm]["Obs $(a.step)-$(a.substep):"] = toolresult
go = goNogo(a)
go, reason = goNogo(a)
a.memory[:shortterm]["Check $(a.step)-$(a.substep):"] = reason
if go == "No" # in case there is a cancel, go straight to evaluation
a.step = totalsteps
a.step -= 1
error(113)
end
end
else #TODO finish all steps
@@ -1158,8 +1185,10 @@ function goNogo(a)
respond = sendReceivePrompt(a, prompt)
decision = GeneralUtils.getStringBetweenCharacters(respond, "{", "}")
start = findfirst("}", respond) +1
reason = respond[start:end]
return decision
return decision, reason
end