Files
ChatAgent_v2/src/interface.jl
2023-12-06 05:03:16 +00:00

1272 lines
37 KiB
Julia
Executable File

module interface
export agentReact, agentReflex,
addNewMessage, clearMessage, removeLatestMsg, conversation, writeEvaluationGuideline,
grading, analyze, selfReflext, actor_mistral_openorca2, formulateUserRespond
using JSON3, DataStructures, Dates, UUIDs, HTTP
using CommUtils, GeneralUtils
using ..type, ..utils
# ---------------------------------------------------------------------------- #
# pythoncall setting #
# ---------------------------------------------------------------------------- #
# Ref: https://github.com/JuliaPy/PythonCall.jl/issues/252
# by setting the following variables, PythonCall will use system python or conda python and
# packages installed by system or conda
# if these setting are not set (comment out), PythonCall will use its own python and package that
# installed by CondaPkg (from env_preparation.jl)
# ENV["JULIA_CONDAPKG_BACKEND"] = "Null"
# systemPython = split(read(`which python`, String), "\n")[1]
# ENV["JULIA_PYTHONCALL_EXE"] = systemPython # find python location with $> which python ex. raw"/root/conda/bin/python"
# using PythonCall
# const py_agents = PythonCall.pynew()
# const py_llms = PythonCall.pynew()
# function __init__()
# # PythonCall.pycopy!(py_cv2, pyimport("cv2"))
# # equivalent to from urllib.request import urlopen in python
# PythonCall.pycopy!(py_agents, pyimport("langchain.agents"))
# PythonCall.pycopy!(py_llms, pyimport("langchain.llms"))
# end
#------------------------------------------------------------------------------------------------100
""" Add new message to agent.
Args:
Return:
```jldoctest
julia> addNewMessage(agent1, "user", "Where should I go to buy snacks")
````
"""
function addNewMessage(a::T1, role::String, content::T2) where {T1<:agent, T2<:AbstractString}
if role a.availableRole # guard against typo
error("role is not in agent.availableRole $(@__LINE__)")
end
# check whether user messages exceed limit
userMsg = 0
for i in a.messages
if i[:role] == "user"
userMsg += 1
end
end
messageleft = 0
if userMsg > a.maxUserMsg # delete all conversation
clearMessage(a)
messageleft = a.maxUserMsg
else
userMsg += 1
d = Dict(:role=> role, :content=> content, :timestamp=> Dates.now())
push!(a.messages, d)
messageleft = a.maxUserMsg - userMsg
end
return messageleft
end
function clearMessage(a::T) where {T<:agent}
for i in eachindex(a.messages)
if length(a.messages) > 1 # system instruction will NOT be deleted
pop!(a.messages)
else
break
end
end
end
function removeLatestMsg(a::T) where {T<:agent}
if length(a.messages) > 1
pop!(a.messages)
end
end
# function generatePrompt_mistral_openorca(a::T, usermsg::String, role::Symbol) where {T<:agent}
# prompt =
# """
# <|im_start|>system
# {systemMsg}
# <|im_end|>
# Here are the context for the question:
# {context}
# """
# prompt = replace(prompt, "{systemMsg}" => a.roles[role])
# toolnames = ""
# toollines = ""
# for (toolname, v) in a.tools
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
# toollines *= toolline
# toolnames *= "$toolname,"
# end
# prompt = replace(prompt, "{toolnames}" => toolnames)
# prompt = replace(prompt, "{tools}" => toollines)
# prompt = replace(prompt, "{context}" => a.context)
# prompt *= "<|im_start|>user\n" * usermsg * "\n<|im_end|>\n"
# prompt *= "<|im_start|>assistant\n"
# return prompt
# end
# function generatePrompt_mistral_openorca(a::T, usermsg::String,
# thinkingMode::Symbol=:nothinking) where {T<:agent}
# prompt =
# """
# <|im_start|>system
# {systemMsg}
# You have access to the following tools:
# {tools}
# {thinkingMode}
# <|im_end|>
# Here are the context for the question:
# {context}
# """
# prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
# prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode])
# toolnames = ""
# toollines = ""
# for (toolname, v) in a.tools
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
# toollines *= toolline
# toolnames *= "$toolname,"
# end
# prompt = replace(prompt, "{toolnames}" => toolnames)
# prompt = replace(prompt, "{tools}" => toollines)
# prompt = replace(prompt, "{context}" => a.context)
# prompt *= "<|im_start|>user\nQuestion: " * usermsg * "\n<|im_end|>\n"
# prompt *= "<|im_start|>assistant\n"
# return prompt
# end
function generatePrompt_mistral_openorca(a::T, usermsg::String,
thinkingMode::Symbol=:nothinking) where {T<:agent}
prompt =
"""
<|im_start|>system
{systemMsg}
{tools}
{thinkingMode}
<|im_end|>
Here are the context for the stimulus:
{context}
"""
prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode])
toolnames = ""
toollines = ""
for (toolname, v) in a.tools
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
toollines *= toolline
toolnames *= "$toolname,"
end
prompt = replace(prompt, "{toolnames}" => toolnames)
prompt = replace(prompt, "{context}" => a.context)
prompt *= "<|im_start|>user\nStimulus: " * usermsg * "\n<|im_end|>\n"
prompt *= "<|im_start|>assistant\n"
return prompt
end
function chat_mistral_openorca(a::agentReflex, usermsg::String)
"""
general prompt format:
"
<|im_start|>system
{role}
{tools}
{thinkingFormat}
<|im_end|>
{context}
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"
Note:
{context} =
"
{earlierConversation}
{env state}
{shortterm memory}
{longterm memory}
"
"""
prompt =
"""
<|im_start|>system
{role}
{thinkingFormat}
<|im_end|>
{context}
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"""
prompt = replace(prompt, "{role}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingFormat}" => "")
context =
"""
{earlierConversation}
{env state}
{longterm memory}
"""
context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
context = replace(context, "{env state}" => "")
context = replace(context, "{longterm memory}" => "")
prompt = replace(prompt, "{context}" => context)
prompt = replace(prompt, "{usermsg}" => "Stimulus: $usermsg")
return prompt
end
function planner_mistral_openorca(a::agentReflex)
"""
general prompt format:
"
<|im_start|>system
{role}
{tools}
{thinkingFormat}
<|im_end|>
{context}
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"
Note:
{context} =
"
{earlierConversation}
{env state}
{shortterm memory}
{longterm memory}
"
"""
prompt =
"""
<|im_start|>system
{role}
{roleSpecificKnowledge}
{tools}
{thinkingFormat}
{context}
<|im_end|>
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
Plan:
"""
prompt = replace(prompt, "{role}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:planner])
roleSpecificKnowledge =
"""
Info you need from the user to be able to help them selecting their best wine:
- type of food
- occasion
- user's personal taste of wine
- wine price range
- ambient temperature at the serving location
- wines we have in stock
You job is to provide a personalized recommendation of up to two wines based on the user's info above, and you describe the benefits of each wine in detail.
"""
prompt = replace(prompt, "{roleSpecificKnowledge}" => roleSpecificKnowledge)
toolnames = ""
toollines = ""
for (toolname, v) in a.tools
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
toollines *= toolline
toolnames *= "$toolname,"
end
prompt = replace(prompt, "{toolnames}" => toolnames)
prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
context =
"""
{earlierConversation}
{env state}
{longterm memory}
"""
context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
context = replace(context, "{env state}" => "")
context = replace(context, "{longterm memory}" => "")
prompt = replace(prompt, "{context}" => context)
# initialize short term memory
prompt = replace(prompt, "{usermsg}" => "Stimulus: $(a.memory[:shortterm]["user:"])")
return prompt
end
function actor_mistral_openorca(a::agentReflex)
"""
general prompt format:
"
<|im_start|>system
{role}
{tools}
{thinkingFormat}
<|im_end|>
{context}
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"
Note:
{context} =
"
{earlierConversation}
{env state}
{shortterm memory}
{longterm memory}
"
"""
mark = "$(a.step)"
prompt =
"""
<|im_start|>system
{role}
{tools}
{thinkingFormat}
{context}
<|im_end|>
{shorttermMemory}
Thought $(a.step):
"""
prompt = replace(prompt, "{role}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:actor])
prompt = replace(prompt, "{step}" => a.step)
s = shortMemoryToString(a.memory[:shortterm], ["user:", "Plan 1:"])
prompt = replace(prompt, "{shorttermMemory}" => s)
toolnames = ""
toollines = ""
for (toolname, v) in a.tools
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
toollines *= toolline
toolnames *= "$toolname, "
end
prompt = replace(prompt, "{toolnames}" => toolnames)
prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
context =
"""
{env state}
{longterm memory}
{plan}
"""
# context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
context = replace(context, "{env state}" => "")
context = replace(context, "{longterm memory}" => "")
context = replace(context, "{plan}" => "My plan:\n$(a.memory[:shortterm]["Plan $(a.attempt):"])")
prompt = replace(prompt, "{context}" => context)
return prompt
end
"""
Chat with llm.
```jldoctest
julia> using JSON3, UUIDs, Dates, FileIO, CommUtils, ChatAgent
julia> mqttClientSpec = (
clientName= "someclient", # name of this client
clientID= "$(uuid4())",
broker= "mqtt.yiem.ai",
pubtopic= (imgAI="img/api/v0.0.1/gpu/request",
txtAI="txt/api/v0.1.0/gpu/request"),
subtopic= (imgAI="agent/api/v0.1.0/img/respond",
txtAI="agent/api/v0.1.0/txt/respond"),
keepalive= 30,
)
julia> msgMeta = Dict(
:msgPurpose=> "updateStatus",
:from=> "agent",
:to=> "llmAI",
:requestrespond=> "request",
:sendto=> "", # destination topic
:replyTo=> "agent/api/v0.1.0/txt/respond", # requester ask responder to send reply to this topic
:repondToMsgId=> "", # responder is responding to this msg id
:taskstatus=> "", # "complete", "fail", "waiting" or other status
:timestamp=> Dates.now(),
:msgId=> "$(uuid4())",
)
julia> newAgent = ChatAgent.agentReact(
"Jene",
mqttClientSpec,
role=:assistant_react,
msgMeta=msgMeta
)
julia> respond = ChatAgent.conversation(newAgent, "Hi! how are you?")
```
"""
function conversation(a::T, usermsg::String) where {T<:agent}
respond = nothing
if a.thought != "nothing" # continue thought
_ = addNewMessage(a, "user", usermsg)
a.thought *= "Obs $(a.thinkinground): $usermsg\n"
prompt = a.thought
respond = work(a, prompt)
else # new thought
thinkingmode = chooseThinkingMode(a, usermsg)
@show thinkingmode
if thinkingmode == :no_thinking
a.context = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details
_ = addNewMessage(a, "user", usermsg)
prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode)
@show prompt
respond = sendReceivePrompt(a, prompt)
respond = split(respond, "<|im_end|>")[1]
respond = replace(respond, "\n" => "")
_ = addNewMessage(a, "assistant", respond)
@show respond
elseif thinkingmode == :thinking
a.context = conversationSummary(a)
_ = addNewMessage(a, "user", usermsg)
prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode)
respond = work(a, prompt)
else
error("undefined condition thinkingmode = $thinkingmode $(@__LINE__)")
end
end
return respond
end
"""
Continuously run llm functions except when llm is getting Answer: or chatbox.
There are many work() depend on thinking mode.
"""
function work(a::T, prompt::String, maxround::Int=3) where {T<:agent}
respond = nothing
while true
a.thinkinground += 1
@show a.thinkinground
toolname = nothing
toolinput = nothing
if a.thinkinground > a.thinkingroundlimit
a.thought *= "Thought $(a.thinkinground): I think I know the answer."
prompt = a.thought
end
@show prompt
respond = sendReceivePrompt(a, prompt)
headerToDetect = nothing
if a.thinkinground == 1
try
respond = split(respond, "Obs:")[1]
headerToDetect = ["Question:", "Plan:", "Thought:", "Act:", "ActInput:", "Obs:", "...", "Answer:",
"Conclusion:", "Summary:"]
catch
end
else
try
respond = split(respond, "Obs $(a.thinkinground):")[1]
headerToDetect = ["Question $(a.thinkinground):", "Plan $(a.thinkinground):",
"Thought $(a.thinkinground):", "Act $(a.thinkinground):",
"ActInput $(a.thinkinground):", "Obs $(a.thinkinground):",
"...", "Answer:",
"Conclusion:", "Summary:"]
catch
end
end
@show respond
headers = detectCharacters(respond, headerToDetect)
chunkedtext = chunktext(respond, headers)
Answer = findDetectedCharacter(headers, "Answer:")
AnswerInd = length(Answer) != 0 ? Answer[1] : nothing
Act = findDetectedCharacter(headers, "Act $(a.thinkinground):")
if length(Answer) == 1 && length(Act) == 0
a.thought = "nothing" # assignment finished, no more thought
a.context = "nothing"
a.thinkinground = 0
respond = chunkedtext[AnswerInd][:body]
respond = replace(respond, "<|im_end|>"=>"")
_ = addNewMessage(a, "assistant", respond)
break
else
# check for tool being called
ActHeader = a.thinkinground == 1 ? "Act:" : "Act $(a.thinkinground):"
if length(findDetectedCharacter(headers, ActHeader)) != 0 # check whether there is Act: in a respond
ActInd = findDetectedCharacter(headers, ActHeader)[1]
toolname = toolNameBeingCalled(chunkedtext[ActInd][:body], a.tools)
end
ActInputHeader = a.thinkinground == 1 ? "ActInput:" : "ActInput $(a.thinkinground):"
if length(findDetectedCharacter(headers, ActInputHeader)) != 0 # check whether there is ActInput: in a respond
ActInputInd = findDetectedCharacter(headers, ActInputHeader)[1]
toolinput = chunkedtext[ActInputInd][:body]
end
# clean up
if occursin(" \"", toolinput)
toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " \"", "\"\n")
else
toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " ", "\n")
end
@show toolname
@show toolinput
if toolname === nothing || toolinput === nothing
println("toolname $toolname toolinput $toolinput retry thinking")
a.thinkinground -= 1
continue
end
if a.thought == "nothing"
thought = ""
for i in chunkedtext
header = i[:header]
header = replace(header, ":"=>" $(a.thinkinground):") # add number so that llm not confused
body = i[:body]
thought *= "$header $body"
end
a.thought = prompt * thought
else
a.thought *= respond
end
if toolname == "chatbox" # chat with user
a.thought *= toolinput
respond = toolinput
_ = addNewMessage(a, "assistant", respond)
break
else # function call
f = a.tools[Symbol(toolname)][:func]
_result = f(toolinput)
if _result != "No info available." #TODO for use with wikisearch(). Not good for other tools
_result = makeSummary(a, _result)
end
result = "Obs $(a.thinkinground): $_result\n"
a.thought *= result
prompt = a.thought
end
end
end
@show respond
return respond
end
function conversation(a::agentReflex, usermsg::String; attemptlimit::Int=3)
a.attemptlimit = attemptlimit
respond = nothing
# determine thinking mode
a.thinkingmode = chooseThinkingMode(a, usermsg)
@show a.thinkingmode
if a.thinkingmode == :no_thinking
a.earlierConversation = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details
_ = addNewMessage(a, "user", usermsg)
prompt = chat_mistral_openorca(a, usermsg)
println("")
@show prompt
respond = sendReceivePrompt(a, prompt)
respond = split(respond, "<|im_end|>")[1]
respond = replace(respond, "\n" => "")
_ = addNewMessage(a, "assistant", respond)
println("")
@show respond
else
respond = work(a, usermsg)
end
return respond
end
function work(a::agentReflex, usermsg::String)
respond = nothing
if a.thinkingmode == :new_thinking
a.earlierConversation = conversationSummary(a)
_ = addNewMessage(a, "user", usermsg)
a.memory[:shortterm]["user:"] = usermsg
a.memory[:log]["user:"] = usermsg
a.newplan = true
elseif a.thinkingmode == :continue_thinking #TODO
println("continue_thinking!!")
_ = addNewMessage(a, "user", usermsg)
a.memory[:shortterm]["Obs $(a.step):"] = usermsg
a.memory[:log]["Obs $(a.step):"] = usermsg
else
error("undefined condition thinkingmode = $thinkingmode $(@__LINE__)")
end
while true # Work loop
# plan
@show a.attempt
@show usermsg
if a.attempt <= a.attemptlimit
toolname = nothing
toolinput = nothing
if a.newplan == true
prompt_plan = planner_mistral_openorca(a)
println("")
@show prompt_plan
respond = sendReceivePrompt(a, prompt_plan, max_tokens=1024)
println("")
plan_raw = respond
@show plan_raw
# sometimes LLM add not-need word I don't want
plan = splittext(respond, ["Step 1", "<|im_end|>", "Response", "Execution",
"Result", "Recommendation", "My response"])
# plan = replace(plan, "Plan:"=>"")
println("")
@show plan
a.attempt += 1
a.step = 0
a.newplan = false
a.memory[:shortterm]["Plan $(a.attempt):"] = plan
a.memory[:log]["Plan $(a.attempt):"] = plan
end
# enter actor loop
actorstate, msgToUser = actor(a)
if actorstate == "chatbox"
respond = msgToUser
break
elseif actorstate == "all steps done" || actorstate == "formulateUserRespond"
println("all steps done")
respond = formulateUserRespond(a)
println("")
formulatedRespond = respond
@show formulatedRespond
a.memory[:shortterm]["Respond $(a.attempt):"] = respond
a.memory[:log]["Respond $mark:"] = respond
# evaluate. if score < 8/10 try again.
guideline = writeEvaluationGuideline(a, a.memory[:shortterm]["user:"])
@show guideline
score = grading(a, guideline, respond)
@show score
if score >= 8 # good enough answer
@show a.memory[:shortterm]
a.memory[:shortterm] = OrderedDict{String, Any}()
a.memory[:log] = OrderedDict{String, Any}()
break
else # self evaluate and reflect then try again
analysis = analyze(a)
@show analysis
lessonwithcontext = selfReflext(a, analysis)
@show lessonwithcontext
a.memory[:shortterm] = OrderedDict{String, Any}()
#TODO add lesson and context into longterm memory
headerToDetect = ["Lesson:", "Context:", ]
headers = detectCharacters(lessonwithcontext, headerToDetect)
chunkedtext = chunktext(lessonwithcontext, headers)
@show chunkedtext
a.memory[:longterm][chunkedtext["Context:"]] = chunkedtext["Lesson:"]
a.newplan = true
end
else
error("undefied condition, actorstate $actorstate $(@__LINE__)")
break
end
else #TODO attempt limit reached, force AI to answer
error("attempt limit reach")
break
end
end
# good enough answer
# communicates with user
_ = addNewMessage(a, "assistant", respond)
return respond
end
# function evaluate()
# end
"""
Actor function.
Args:
a, one of ChatAgent's agent.
plan, a step by step plan to respond
Return:
case 1) if actor complete the plan successfully.
actorState = "all steps done" inidicates that all step in plan were done.
msgToUser = nothing.
case 2) if actor needs to talk to user for more context
actorState = "chatbox"
msgToUser = "message from assistant to user"
"""
function actor(a::agentReflex)
actorState = nothing
msgToUser = nothing
totalsteps = checkTotalStepInPlan(a)
while true # Actor loop
if a.step == 0
a.step = 1
else
decision, reason = goNogo(a)
println("")
@show decision
@show reason
# a.memory[:shortterm]["Check $(a.step):"] = reason
if decision == "Yes" # in case there is a cancel, go straight to evaluation
a.step += 1
elseif decision == "No"
# repeat the latest step
a.memory[:shortterm] = removeHeaders(a.memory[:shortterm], a.step, ["Plan"])
println("repeating step $(a.step)")
elseif decision == "formulateUserRespond"
actorState = "formulateUserRespond"
msgToUser = nothing
break
else
error("undefined condition decision = $decision $(@__LINE__)")
end
end
@show a.step
if a.step < totalsteps
prompt_actor = actor_mistral_openorca(a)
println("")
@show prompt_actor
respond = sendReceivePrompt(a, prompt_actor)
respond = splittext(respond, ["Obs", "<|im_end|>"])
respond_actor_raw = respond
println("")
@show respond_actor_raw
if !occursin("Thought", respond)
respond = "Thought: " * respond
end
headerToDetect = ["Question:", "Plan:", "Thought:",
"Act:", "ActInput:", "Obs:", "...",
"Answer:", "Conclusion:", "Summary:"]
# replace headers with headers with correct attempt and step number
respond = replaceHeaders(respond, headerToDetect, a.step)
headers = detectCharacters(respond, headerToDetect)
respond_actor = respond
println("")
@show respond_actor
headerToDetect = ["Plan $(a.attempt):",
"Thought $(a.step):",
"Act $(a.step):",
"ActInput $(a.step):",
"Obs $(a.step):",
"Check $(a.step):",]
headers = detectCharacters(respond, headerToDetect)
chunkedtext = chunktext(respond, headers)
@show chunkedtext
# add to memory
a.memory[:shortterm] = addShortMem!(a.memory[:shortterm], chunkedtext)
toolname = toolNameBeingCalled(chunkedtext["Act $(a.step):"], a.tools)
toolinput = chunkedtext["ActInput $(a.step):"]
@show toolname
@show toolinput
if toolname == "chatbox" # chat with user
msgToUser = toolinput
actorState = toolname
break
elseif toolname == "formulateUserRespond"
msgToUser = toolinput
actorState = toolname
break
else # function call
f = a.tools[Symbol(toolname)][:func]
toolresult = f(a, toolinput)
@show toolresult
a.memory[:shortterm]["Obs $(a.step):"] = toolresult
end
else #TODO finish all steps
actorState = "all steps done"
msgToUser = nothing
break
end
end
return actorState, msgToUser
end
""" Write evaluation guideline.
Args:
a, one of ChatAgent's agent.
usermsg, stimulus e.g. question, task and etc.
Return:
An evaluation guideline used to guage AI's work.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
julia> usermsg = "What's AMD latest product?"
"
julia> evaluationGuideLine = writeEvaluationGuideline(agent, usermsg)
```
"""
function writeEvaluationGuideline(a::agentReflex, usermsg::T) where {T<:AbstractString}
prompt =
"""
<|im_start|>system
You have access to the following tools:
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
Your work:
$usermsg
Your job are:
1. Write an evaluation guideline for your work in order to be able to evaluate your respond.
2. An example of what the respond should be.
<|im_end|>
"""
respond = sendReceivePrompt(a, prompt)
return respond
end
""" Determine a score out of 10 according to evaluation guideline.
Args:
a, one of ChatAgent's agent.
guidelines, an evaluation guideline.
shorttermMemory, a short term memory that logs what happened.
Return:
A score out of 10 based on guideline.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
julia> shorttermMemory = OrderedDict{String, Any}(
"user" => "What's the latest AMD GPU?",
"Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n",
"Act 1:" => " wikisearch\n",
"ActInput 1:" => " amd gpu latest\n",
"Obs 1:" => "No info available for your search query.",
"Act 2:" => " wikisearch\n",
"ActInput 2:" => " amd graphics card latest\n",
"Obs 2:" => "No info available for your search query.")
julia> guideline = "\nEvaluation Guideline:\n1. Check if the user's question has been understood correctly.\n2. Evaluate the steps taken to provide the information requested by the user.\n3. Assess whether the correct tools were used for the task.\n4. Determine if the user's request was successfully fulfilled.\n5. Identify any potential improvements or alternative approaches that could be used in the future.\n\nThe respond should include:\n1. A clear understanding of the user's question.\n2. The steps taken to provide the information requested by the user.\n3. An evaluation of whether the correct tools were used for the task.\n4. A confirmation or explanation if the user's request was successfully fulfilled.\n5. Any potential improvements or alternative approaches that could be used in the future."
julia> score = grading(agent, guideline, shorttermMemory)
2
```
"""
function grading(a, guideline::T, text::T) where {T<:AbstractString}
prompt =
"""
<|im_start|>system
You have access to the following tools:
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
$guideline
Your respond: $text
You job are:
1. Evaluate your respond using the evaluation guideline and an example respond.
2. Give yourself a score out of 10 for your respond.
Use the following format to answer:
{Evaluation} Score {}/10.
<|im_end|>
"""
println("")
prompt_grading = prompt
@show prompt_grading
respond = sendReceivePrompt(a, prompt)
println("")
respond_grading = respond
@show respond_grading
_score = split(respond[end-5:end], "/")[1]
_score = split(_score, " ")[end]
score = parse(Int, _score)
return score
end
""" Analize work.
Args:
a, one of ChatAgent's agent.
Return:
A report of analized work.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
julia> shorttermMemory = OrderedDict{String, Any}(
"user:" => "What's the latest AMD GPU?",
"Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n",
"Act 1:" => " wikisearch\n",
"ActInput 1:" => " amd gpu latest\n",
"Obs 1:" => "No info available for your search query.",
"Act 2:" => " wikisearch\n",
"ActInput 2:" => " amd graphics card latest\n",
"Obs 2:" => "No info available for your search query.")
julia> report = analyze(agent, shorttermMemory)
```
""" #WORKING analyze sometime result in empty string ""
function analyze(a)
shorttermMemory = shortMemoryToString(a.memory[:shortterm], ["user:"])
prompt =
"""
<|im_start|>system
You have access to the following tools:
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
Your work:
$shorttermMemory
Do each of the following steps in detail to analize your work.
1. What happened?
2. List all relationships, each with cause and effect .
3. Look at each relationship, figure out why it behaved that way.
4. What could you do to improve the respond?
<|im_end|>
"""
respond = sendReceivePrompt(a, prompt, max_tokens=2048)
println("")
analyze_prompt = prompt
@show analyze_prompt
println("")
analyze_respond = respond
@show analyze_respond
return respond
end
""" Write a lesson drawn from evaluation.
Args:
a, one of ChatAgent's agent.
report, a report resulted from analyzing shorttermMemory
Return:
A lesson.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
julia> report =
"What happened: I tried to search for AMD's latest product using the wikisearch tool,
but no information was available in the search results.
Cause and effect relationships:
1. Searching \"AMD latest product\" -> No info available.
2. Searching \"most recent product release\" -> No info available.
3. Searching \"latest product\" -> No info available.
Analysis of each relationship:
1. The search for \"AMD latest product\" did not provide any information because the wikisearch tool could not find relevant results for that query.
2. The search for \"most recent product release\" also did not yield any results, indicating that there might be no recent product releases available or that the information is not accessible through the wikisearch tool.
3. The search for \"latest product\" similarly resulted in no information being found, suggesting that either the latest product is not listed on the encyclopedia or it is not easily identifiable using the wikisearch tool.
Improvements: To improve the response, I could try searching for AMD's products on a different
source or search engine to find the most recent product release. Additionally, I could ask
the user for more context or clarify their question to better understand what they are
looking for."
julia> lesson = selfReflext(agent, report)
```
"""
function selfReflext(a, analysis::T) where {T<:AbstractString}
prompt =
"""
<|im_start|>system
You have access to the following tools:
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
Your report:
$analysis
Your job are:
1. Lesson: what lesson could you learn from your report?.
2. Context: what is the context this lesson could apply to?
<|im_end|>
"""
respond = sendReceivePrompt(a, prompt, max_tokens=2048)
return respond
end
""" Formulate a respond from work for user's stimulus.
Args:
a, one of ChatAgent's agent.
Return:
A respond for user's stimulus.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
julia> shorttermMemory = OrderedDict{String, Any}(
"user:" => "What's the latest AMD GPU?",
"Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n",
"Act 1:" => " wikisearch\n",
"ActInput 1:" => " amd gpu latest\n",
"Obs 1:" => "No info available for your search query.",
"Act 2:" => " wikisearch\n",
"ActInput 2:" => " amd graphics card latest\n",
"Obs 2:" => "No info available for your search query.")
julia> report = formulateUserRespond(agent, shorttermMemory)
```
"""
function formulateUserRespond(a)
stimulus = a.memory[:shortterm]["user:"]
work = shortMemoryToString(a.memory[:shortterm], ["user:"])
prompt =
"""
<|im_start|>system
Symbol:
Stimulus: the input user gives to you and you must respond
Plan: a plan
Thought: your thought
Act: the action you took
ActInput: the input to the action
Obs: the result of the action
Stimulus:
$stimulus
Your work:
$work
From your work, formulate a respond for user's stimulus.
<|im_end|>
Respond:
"""
respond = sendReceivePrompt(a, prompt)
return respond
end
""" Determine whether LLM should go to next step.
Args:
a, one of ChatAgent's agent.
Return:
"Yes" or "no" decision to go next step.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
julia> shorttermMemory = OrderedDict{String, Any}(
"user:" => "What's the latest AMD GPU?",
"Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n",
"Act 1:" => " wikisearch\n",
"ActInput 1:" => " amd gpu latest\n",
"Obs 1:" => "No info available for your search query.",
"Act 2:" => " wikisearch\n",
"ActInput 2:" => " amd graphics card latest\n",
"Obs 2:" => "No info available for your search query.")
julia> decision = goNogo(agent)
"Yes"
```
""" #BUG sometime AI ready to formulate respond before all step are completed
function goNogo(a)
stimulus = a.memory[:shortterm]["user:"]
work = shortMemoryToString(a.memory[:shortterm], ["user:"])
# prompt =
# """
# <|im_start|>system
# Symbol meaning:
# Stimulus: the input user gives to you and you must respond
# Plan: a plan
# Thought: your thought
# Act: the action you took
# ActInput: the input to the action
# Obs: the result of the action
# Stimulus:
# $stimulus
# Your work:
# $work
# From your work, you job is to decide what to do next by choosing one of the following choices:
# If you are ready to do the next step of the plan say, "{Yes}". And what is the rationale behind the decision?
# If you need to repeat the latest step say, "{No}". And what is the rationale behind the decision?
# If you are ready to formulate a final respond to user original stimulus say, {formulateUserRespond}. And what is the rationale behind the decision?
# <|im_end|>
# """
prompt =
"""
<|im_start|>system
Symbol meaning:
Stimulus: the input user gives to you and you must respond
Plan: a plan
Thought: your thought
Act: the action you took
ActInput: the input to the action
Obs: the result of the action
Stimulus:
$stimulus
Your work:
$work
Your job is to check whether step $(a.step) of your work is completed according to the plan and choose only one of the following choices.
choice 1: If you are ready to do the next step of the plan say, "{Yes}". And what is the rationale behind the decision?
choice 2: If you need to repeat the latest step say, "{No}". And what is the rationale behind the decision?
<|im_end|>
<|im_start|>assistant
"""
respond = sendReceivePrompt(a, prompt)
println("")
goNogo_raw = respond
@show goNogo_raw
decision = nothing
reason = nothing
if occursin("Yes", respond)
decision = "Yes"
elseif occursin("No", respond)
decision = "No"
elseif occursin("formulateUserRespond", respond)
decision = "formulateUserRespond"
else
error("undefied condition, decision $decision $(@__LINE__)")
end
startInd = findfirst(decision, respond)[end] +2
if occursin(":", respond[startInd:end]) # check for ":" after decision cha
startInd2 = findnext(":", respond, startInd)[end]+1
reason = respond[startInd2:end]
else
reason = respond[startInd:end]
end
return decision, reason
end
end # module