This commit is contained in:
2023-12-17 05:11:52 +00:00
parent 7d0e45d067
commit 46933cb281

View File

@@ -92,102 +92,6 @@ function removeLatestMsg(a::T) where {T<:agent}
end
end
# function generatePrompt_mistral_openorca(a::T, usermsg::String, role::Symbol) where {T<:agent}
# prompt =
# """
# <|im_start|>system
# {systemMsg}
# <|im_end|>
# Here are the context for the question:
# {context}
# """
# prompt = replace(prompt, "{systemMsg}" => a.roles[role])
# toolnames = ""
# toollines = ""
# for (toolname, v) in a.tools
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
# toollines *= toolline
# toolnames *= "$toolname,"
# end
# prompt = replace(prompt, "{toolnames}" => toolnames)
# prompt = replace(prompt, "{tools}" => toollines)
# prompt = replace(prompt, "{context}" => a.context)
# prompt *= "<|im_start|>user\n" * usermsg * "\n<|im_end|>\n"
# prompt *= "<|im_start|>assistant\n"
# return prompt
# end
# function generatePrompt_mistral_openorca(a::T, usermsg::String,
# thinkingMode::Symbol=:nothinking) where {T<:agent}
# prompt =
# """
# <|im_start|>system
# {systemMsg}
# You have access to the following tools:
# {tools}
# {thinkingMode}
# <|im_end|>
# Here are the context for the question:
# {context}
# """
# prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
# prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode])
# toolnames = ""
# toollines = ""
# for (toolname, v) in a.tools
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
# toollines *= toolline
# toolnames *= "$toolname,"
# end
# prompt = replace(prompt, "{toolnames}" => toolnames)
# prompt = replace(prompt, "{tools}" => toollines)
# prompt = replace(prompt, "{context}" => a.context)
# prompt *= "<|im_start|>user\nQuestion: " * usermsg * "\n<|im_end|>\n"
# prompt *= "<|im_start|>assistant\n"
# return prompt
# end
function generatePrompt_mistral_openorca(a::T, usermsg::String,
thinkingMode::Symbol=:nothinking) where {T<:agent}
prompt =
"""
<|im_start|>system
{systemMsg}
{tools}
{thinkingMode}
<|im_end|>
Here are the context for the stimulus:
{context}
"""
prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode])
toolnames = ""
toollines = ""
for (toolname, v) in a.tools
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
toollines *= toolline
toolnames *= "$toolname,"
end
prompt = replace(prompt, "{toolnames}" => toolnames)
prompt = replace(prompt, "{context}" => a.context)
prompt *= "<|im_start|>user\nStimulus: " * usermsg * "\n<|im_end|>\n"
prompt *= "<|im_start|>assistant\n"
return prompt
end
function chat_mistral_openorca(a::agentReflex)
"""
general prompt format:
@@ -266,7 +170,7 @@ function planner_mistral_openorca(a::agentReflex)
conversation = messagesToString(a.messages)
toollines = ""
for (toolname, v) in a.tools
if toolname ["chatbox"]
if toolname [""]
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
toollines *= toolline
end
@@ -313,93 +217,50 @@ function planner_mistral_openorca(a::agentReflex)
return objective, plan
end
# function planner_mistral_openorca(a::agentReflex)
# """
# general prompt format:
# "
# <|im_start|>system
# {role}
# {tools}
# {thinkingFormat}
# <|im_end|>
# {context}
# <|im_start|>user
# {usermsg}
# <|im_end|>
# <|im_start|>assistant
# "
# Note:
# {context} =
# "
# {earlierConversation}
# {env state}
# {shortterm memory}
# {longterm memory}
# "
# """
# prompt =
# """
# <|im_start|>system
# {role}
# {roleSpecificKnowledge}
# {tools}
# {thinkingFormat}
# {context}
# <|im_end|>
# <|im_start|>user
# {usermsg}
# <|im_end|>
# <|im_start|>assistant
# Plan:
# """
# prompt = replace(prompt, "{role}" => a.roles[a.role])
# prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:planner])
# roleSpecificKnowledge =
# """
# Info you need from the user to be able to help them selecting their best wine:
# - type of food
# - occasion
# - user's personal taste of wine
# - wine price range
# - ambient temperature at the serving location
# - wines we have in stock
# You job is to provide a personalized recommendation of up to two wines based on the user's info above, and you describe the benefits of each wine in detail.
# """
# prompt = replace(prompt, "{roleSpecificKnowledge}" => roleSpecificKnowledge)
# toolnames = ""
# toollines = ""
# for (toolname, v) in a.tools
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
# toollines *= toolline
# toolnames *= "$toolname,"
# end
# prompt = replace(prompt, "{toolnames}" => toolnames)
# prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
""" Update the current plan.
""" #WORKING
function updatePlan()
conversation = messagesToString_nomark(a.messages)
toollines = ""
for (toolname, v) in a.tools
if toolname ["chatbox"]
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
toollines *= toolline
end
end
# # prepare contex
# context =
# """
# My earlier talk with the user:
# $(a.earlierConversation)
work = dictToString(a.memory[:shortterm])
# My earlier experience
# $(experience(a.memory[:longterm]))
# """
prompt =
"""
<|im_start|>system
$(a.roles[a.role])
The required info you need for wine recommendation:
- type of food: ask the user
- occasion: ask the user
- user's personal taste of wine: ask the user
- wine price range: ask the user
- ambient temperature at the serving location: ask the user
- wines we have in stock
You provide a personalized recommendation of up to two wines based on the user's info above, and you describe the benefits of each wine in detail.
You have access to the following tools:
$toollines
# prompt = replace(prompt, "{context}" => context)
Your talk with the user:
$conversation
# # initialize short term memory
# prompt = replace(prompt, "{usermsg}" => "Stimulus: $(a.memory[:shortterm]["user:"])")
Your work:
$work
# return prompt
# end
You job is to use info from your conversation with the user and your work to up date the plan.
<|im_end|>
New plan:
"""
end
function actor_mistral_openorca(a::agentReflex)
"""
@@ -429,22 +290,6 @@ function actor_mistral_openorca(a::agentReflex)
"
"""
prompt =
"""
<|im_start|>system
$(a.roles[a.role])
{tools}
$(a.thinkingFormat[:actor])
{context}
<|im_end|>
{shorttermMemory}
Thought $(a.step):
"""
s = dictToString(a.memory[:shortterm], skiplist=["user:", "Plan 1:"])
prompt = replace(prompt, "{shorttermMemory}" => s)
toolnames = ""
toollines = ""
for (toolname, v) in a.tools
@@ -452,8 +297,8 @@ function actor_mistral_openorca(a::agentReflex)
toollines *= toolline
toolnames *= "$toolname, "
end
prompt = replace(prompt, "{toolnames}" => toolnames)
prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
shorttermMemory = dictToString(a.memory[:shortterm], skiplist=["user:"])
conversation = messagesToString_nomark(a.messages, addressAIas="I")
@@ -461,93 +306,28 @@ function actor_mistral_openorca(a::agentReflex)
"""
Your talk with the user:
$conversation
{env state}
{longterm memory}
{plan}
"""
# context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
context = replace(context, "{env state}" => "")
context = replace(context, "{longterm memory}" => "")
context = replace(context, "{plan}" => "My plan:\n$(a.memory[:shortterm]["Plan $(a.attempt):"])")
prompt = replace(prompt, "{context}" => context)
prompt =
"""
<|im_start|>system
$(a.roles[a.role])
You have access to the following tools:
$toollines
$(a.thinkingFormat[:actor])
$context
<|im_end|>
$shorttermMemory
Thought $(a.step):
"""
prompt = replace(prompt, "{toolnames}" => toolnames)
return prompt
end
# function actor_mistral_openorca(a::agentReflex)
# """
# general prompt format:
# "
# <|im_start|>system
# {role}
# {tools}
# {thinkingFormat}
# <|im_end|>
# {context}
# <|im_start|>user
# {usermsg}
# <|im_end|>
# <|im_start|>assistant
# "
# Note:
# {context} =
# "
# {earlierConversation}
# {env state}
# {shortterm memory}
# {longterm memory}
# "
# """
# mark = "$(a.step)"
# prompt =
# """
# <|im_start|>system
# {role}
# {tools}
# {thinkingFormat}
# {context}
# <|im_end|>
# {shorttermMemory}
# Thought $(a.step):
# """
# prompt = replace(prompt, "{role}" => a.roles[a.role])
# prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:actor])
# prompt = replace(prompt, "{step}" => a.step)
# s = dictToString(a.memory[:shortterm], skiplist=["user:", "Plan 1:"])
# prompt = replace(prompt, "{shorttermMemory}" => s)
# toolnames = ""
# toollines = ""
# for (toolname, v) in a.tools
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
# toollines *= toolline
# toolnames *= "$toolname, "
# end
# prompt = replace(prompt, "{toolnames}" => toolnames)
# prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
# context =
# """
# {env state}
# {longterm memory}
# {plan}
# """
# # context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
# context = replace(context, "{env state}" => "")
# context = replace(context, "{longterm memory}" => "")
# context = replace(context, "{plan}" => "My plan:\n$(a.memory[:shortterm]["Plan $(a.attempt):"])")
# prompt = replace(prompt, "{context}" => context)
# return prompt
# end
"""
@@ -603,7 +383,7 @@ function conversation(a::agentReflex, usermsg::String; attemptlimit::Int=3)
end
# if LLM using chatbox, use returning msg form chatbox as conversation response
if workstate == "chatbox"
if workstate == "chatbox" || workstate == "formulatedUserResponse"
#TODO paraphrase msg so that it is human friendlier word.
else
response = chat_mistral_openorca(a)
@@ -653,7 +433,7 @@ function work(a::agentReflex)
@show a.attempt
# enter actor loop
actorstate, msgToUser = actor(a) #WORKING
actorstate, msgToUser = actor(a)
if actorstate == "chatbox"
response = msgToUser
@@ -680,6 +460,10 @@ function work(a::agentReflex)
score = grading(a, guideline, response)
@show score
if score > 5 # good enough answer
println("")
formulatedresponse_final = response
@show formulatedresponse_final
workstate = "formulatedUserResponse"
a.memory[:shortterm] = OrderedDict{String, Any}()
a.memory[:log] = OrderedDict{String, Any}()
break
@@ -722,239 +506,6 @@ function work(a::agentReflex)
end
# function work(a::agentReflex, usermsg::String)
# response = nothing
# a.memory[:shortterm] = OrderedDict{String, Any}()
# a.memory[:log] = OrderedDict{String, Any}()
# a.memory[:shortterm]["user:"] = usermsg
# a.memory[:log]["user:"] = usermsg
# a.newplan = true
# while true # Work loop
# # plan
# if a.attempt <= a.attemptlimit
# toolname = nothing
# toolinput = nothing
# if a.newplan == true
# a.attempt += 1
# a.step = 0
# prompt_plan = planner_mistral_openorca(a)
# println("")
# @show prompt_plan
# response = sendReceivePrompt(a, prompt_plan, max_tokens=1024)
# # sometimes LLM add not-need word I don't want
# plan = splittext(response, ["Step 1", "<|im_end|>", "Response", "Execution",
# "Result", "Recommendation", "My response"])
# # plan = replace(plan, "Plan:"=>"")
# println("")
# @show plan
# a.newplan = false
# a.memory[:shortterm]["Plan $(a.attempt):"] = plan
# a.memory[:log]["Plan $(a.attempt):"] = plan
# end
# println("")
# @show a.attempt
# # enter actor loop
# actorstate, msgToUser = actor(a)
# if actorstate == "chatbox"
# response = msgToUser
# break
# elseif actorstate == "all steps done" || actorstate == "formulateUserresponse"
# println("all steps done")
# response = formulateUserresponse(a)
# println("")
# formulatedresponse = response
# @show formulatedresponse
# a.memory[:shortterm]["response $(a.attempt):"] = response
# a.memory[:log]["response $(a.attempt):"] = response
# # evaluate. if score < 8/10 try again.
# guideline = writeEvaluationGuideline(a, a.memory[:shortterm]["user:"])
# println("")
# @show guideline
# score = grading(a, guideline, response)
# @show score
# if score >= 6 # good enough answer
# break
# else # self evaluate and reflect then try again
# analysis = analyze(a)
# println("")
# @show analysis
# lessonwithcontext = selfReflext(a, analysis)
# println("")
# @show lessonwithcontext
# newdict = OrderedDict()
# a.memory[:shortterm] = keepOnlyKeys(a.memory[:shortterm], ["user:"])
# headerToDetect = ["Lesson:", "Context:", ]
# headers = detectCharacters(lessonwithcontext, headerToDetect)
# chunkedtext = chunktext(lessonwithcontext, headers)
# a.memory[:longterm][chunkedtext["Context:"]] = chunkedtext["Lesson:"]
# a.newplan = true
# println("")
# println("RETRY $(a.attempt +1)")
# println("")
# end
# else
# error("undefied condition, actorstate $actorstate $(@__LINE__)")
# break
# end
# else
# error("attempt limit reach")
# break
# end
# end
# # good enough answer
# # communicates with user
# _ = addNewMessage(a, "assistant", response)
# return response
# end
# function work(a::agentReflex, usermsg::String)
# response = nothing
# if a.thinkingmode == :new_thinking
# _ = addNewMessage(a, "user", usermsg)
# a.memory[:shortterm] = OrderedDict{String, Any}()
# a.memory[:log] = OrderedDict{String, Any}()
# a.memory[:shortterm]["user:"] = usermsg
# a.memory[:log]["user:"] = usermsg
# a.newplan = true
# elseif a.thinkingmode == :continue_thinking
# println("continue_thinking!!")
# _ = addNewMessage(a, "user", usermsg)
# a.memory[:shortterm]["Obs $(a.step):"] = usermsg
# a.memory[:log]["Obs $(a.step):"] = usermsg
# else
# error("undefined condition thinkingmode = $thinkingmode $(@__LINE__)")
# end
# while true # Work loop
# # plan
# if a.attempt <= a.attemptlimit
# toolname = nothing
# toolinput = nothing
# if a.newplan == true
# a.attempt += 1
# a.step = 0
# prompt_plan = planner_mistral_openorca(a)
# println("")
# @show prompt_plan
# response = sendReceivePrompt(a, prompt_plan, max_tokens=1024)
# # sometimes LLM add not-need word I don't want
# plan = splittext(response, ["Step 1", "<|im_end|>", "Response", "Execution",
# "Result", "Recommendation", "My response"])
# # plan = replace(plan, "Plan:"=>"")
# println("")
# @show plan
# a.newplan = false
# a.memory[:shortterm]["Plan $(a.attempt):"] = plan
# a.memory[:log]["Plan $(a.attempt):"] = plan
# end
# println("")
# @show a.attempt
# # enter actor loop
# actorstate, msgToUser = actor(a)
# if actorstate == "chatbox"
# response = msgToUser
# break
# elseif actorstate == "all steps done" || actorstate == "formulateUserresponse"
# println("all steps done")
# response = formulateUserresponse(a)
# println("")
# formulatedresponse = response
# @show formulatedresponse
# a.memory[:shortterm]["response $(a.attempt):"] = response
# a.memory[:log]["response $(a.attempt):"] = response
# # evaluate. if score < 8/10 try again.
# guideline = writeEvaluationGuideline(a, a.memory[:shortterm]["user:"])
# println("")
# @show guideline
# score = grading(a, guideline, response)
# @show score
# if score >= 6 # good enough answer
# break
# else # self evaluate and reflect then try again
# analysis = analyze(a)
# println("")
# @show analysis
# lessonwithcontext = selfReflext(a, analysis)
# println("")
# @show lessonwithcontext
# newdict = OrderedDict()
# a.memory[:shortterm] = keepOnlyKeys(a.memory[:shortterm], ["user:"])
# headerToDetect = ["Lesson:", "Context:", ]
# headers = detectCharacters(lessonwithcontext, headerToDetect)
# chunkedtext = chunktext(lessonwithcontext, headers)
# a.memory[:longterm][chunkedtext["Context:"]] = chunkedtext["Lesson:"]
# a.newplan = true
# println("")
# println("RETRY $(a.attempt +1)")
# println("")
# end
# else
# error("undefied condition, actorstate $actorstate $(@__LINE__)")
# break
# end
# else
# error("attempt limit reach")
# break
# end
# end
# # good enough answer
# # communicates with user
# _ = addNewMessage(a, "assistant", response)
# return response
# end
# function evaluate()
# end
"""
Actor function.
@@ -1004,7 +555,7 @@ function actor(a::agentReflex)
@show a.step
if a.step < totalsteps -1 # the last step of the plan is responding, let work() do this part
#WORKING check whether LLM already complete the current step
# check whether LLM already complete the current step
iscomplete = checkStepCompletion(a)
if iscomplete == false
@@ -1048,18 +599,18 @@ function actor(a::agentReflex)
a.memory[:log] = addShortMem!(a.memory[:log], chunkedtext)
toolname = toolNameBeingCalled(chunkedtext["Act $(a.step):"], a.tools)
toolinput = chunkedtext["Actinput $(a.step):"]
@show toolname
@show toolinput
if toolname == "chatbox" # chat with user
msgToUser = toolinput
msgToUser = chunkedtext["Actinput $(a.step):"]
actorState = toolname
break
elseif toolname == "skipstep"
elseif toolname == "N/A"
a.step += 1
else # function call
f = a.tools[toolname][:func]
toolinput = chunkedtext["Actinput $(a.step):"]
toolresult = f(a, toolinput)
@show toolresult
a.memory[:shortterm]["Obs $(a.step):"] = toolresult