Files
ChatAgent/src/interface.jl

995 lines
28 KiB
Julia
Executable File

module interface
export agentReact, agentReflex,
addNewMessage, clearMessage, removeLatestMsg, conversation, writeEvaluationGuidlines,
grading, analyze, selfReflext
using JSON3, DataStructures, Dates, UUIDs, HTTP
using CommUtils, GeneralUtils
using ..type, ..utils
# ---------------------------------------------------------------------------- #
# pythoncall setting #
# ---------------------------------------------------------------------------- #
# Ref: https://github.com/JuliaPy/PythonCall.jl/issues/252
# by setting the following variables, PythonCall will use system python or conda python and
# packages installed by system or conda
# if these setting are not set (comment out), PythonCall will use its own python and package that
# installed by CondaPkg (from env_preparation.jl)
# ENV["JULIA_CONDAPKG_BACKEND"] = "Null"
# systemPython = split(read(`which python`, String), "\n")[1]
# ENV["JULIA_PYTHONCALL_EXE"] = systemPython # find python location with $> which python ex. raw"/root/conda/bin/python"
# using PythonCall
# const py_agents = PythonCall.pynew()
# const py_llms = PythonCall.pynew()
# function __init__()
# # PythonCall.pycopy!(py_cv2, pyimport("cv2"))
# # equivalent to from urllib.request import urlopen in python
# PythonCall.pycopy!(py_agents, pyimport("langchain.agents"))
# PythonCall.pycopy!(py_llms, pyimport("langchain.llms"))
# end
#------------------------------------------------------------------------------------------------100
""" Add new message to agent.
Args:
Return:
```jldoctest
julia> addNewMessage(agent1, "user", "Where should I go to buy snacks")
````
"""
function addNewMessage(a::T1, role::String, content::T2) where {T1<:agent, T2<:AbstractString}
if role a.availableRole # guard against typo
error("role is not in agent.availableRole")
end
# check whether user messages exceed limit
userMsg = 0
for i in a.messages
if i[:role] == "user"
userMsg += 1
end
end
messageleft = 0
if userMsg > a.maxUserMsg # delete all conversation
clearMessage(a)
messageleft = a.maxUserMsg
else
userMsg += 1
d = Dict(:role=> role, :content=> content, :timestamp=> Dates.now())
push!(a.messages, d)
messageleft = a.maxUserMsg - userMsg
end
return messageleft
end
function clearMessage(a::T) where {T<:agent}
for i in eachindex(a.messages)
if length(a.messages) > 1 # system instruction will NOT be deleted
pop!(a.messages)
else
break
end
end
a.thought = "nothing"
end
function removeLatestMsg(a::T) where {T<:agent}
if length(a.messages) > 1
pop!(a.messages)
end
end
# function generatePrompt_mistral_openorca(a::T, usermsg::String, role::Symbol) where {T<:agent}
# prompt =
# """
# <|im_start|>system
# {systemMsg}
# <|im_end|>
# Here are the context for the question:
# {context}
# """
# prompt = replace(prompt, "{systemMsg}" => a.roles[role])
# toolnames = ""
# toollines = ""
# for (toolname, v) in a.tools
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
# toollines *= toolline
# toolnames *= "$toolname,"
# end
# prompt = replace(prompt, "{toolnames}" => toolnames)
# prompt = replace(prompt, "{tools}" => toollines)
# prompt = replace(prompt, "{context}" => a.context)
# prompt *= "<|im_start|>user\n" * usermsg * "\n<|im_end|>\n"
# prompt *= "<|im_start|>assistant\n"
# return prompt
# end
# function generatePrompt_mistral_openorca(a::T, usermsg::String,
# thinkingMode::Symbol=:nothinking) where {T<:agent}
# prompt =
# """
# <|im_start|>system
# {systemMsg}
# You have access to the following tools:
# {tools}
# {thinkingMode}
# <|im_end|>
# Here are the context for the question:
# {context}
# """
# prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
# prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode])
# toolnames = ""
# toollines = ""
# for (toolname, v) in a.tools
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
# toollines *= toolline
# toolnames *= "$toolname,"
# end
# prompt = replace(prompt, "{toolnames}" => toolnames)
# prompt = replace(prompt, "{tools}" => toollines)
# prompt = replace(prompt, "{context}" => a.context)
# prompt *= "<|im_start|>user\nQuestion: " * usermsg * "\n<|im_end|>\n"
# prompt *= "<|im_start|>assistant\n"
# return prompt
# end
function generatePrompt_mistral_openorca(a::T, usermsg::String,
thinkingMode::Symbol=:nothinking) where {T<:agent}
prompt =
"""
<|im_start|>system
{systemMsg}
{tools}
{thinkingMode}
<|im_end|>
Here are the context for the stimulus:
{context}
"""
prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode])
toolnames = ""
toollines = ""
for (toolname, v) in a.tools
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
toollines *= toolline
toolnames *= "$toolname,"
end
prompt = replace(prompt, "{toolnames}" => toolnames)
prompt = replace(prompt, "{context}" => a.context)
prompt *= "<|im_start|>user\nStimulus: " * usermsg * "\n<|im_end|>\n"
prompt *= "<|im_start|>assistant\n"
return prompt
end
function chat_mistral_openorca(a::agentReflex, usermsg::String)
"""
general prompt format:
"
<|im_start|>system
{role}
{tools}
{thinkingFormat}
<|im_end|>
{context}
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"
Note:
{context} =
"
{earlierConversation}
{env state}
{shortterm memory}
{longterm memory}
"
"""
prompt =
"""
<|im_start|>system
{role}
{thinkingFormat}
<|im_end|>
{context}
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"""
prompt = replace(prompt, "{role}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingFormat}" => "")
context =
"""
{earlierConversation}
{env state}
{longterm memory}
"""
context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
context = replace(context, "{env state}" => "")
context = replace(context, "{longterm memory}" => "")
prompt = replace(prompt, "{context}" => context)
prompt = replace(prompt, "{usermsg}" => "Stimulus: $usermsg")
return prompt
end
function planner_mistral_openorca(a::agentReflex, usermsg::String)
"""
general prompt format:
"
<|im_start|>system
{role}
{tools}
{thinkingFormat}
<|im_end|>
{context}
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"
Note:
{context} =
"
{earlierConversation}
{env state}
{shortterm memory}
{longterm memory}
"
"""
prompt =
"""
<|im_start|>system
{role}
{tools}
{thinkingFormat}
<|im_end|>
{context}
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"""
prompt = replace(prompt, "{role}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:planner])
toolnames = ""
toollines = ""
for (toolname, v) in a.tools
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
toollines *= toolline
toolnames *= "$toolname,"
end
prompt = replace(prompt, "{toolnames}" => toolnames)
prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
context =
"""
{earlierConversation}
{env state}
{longterm memory}
"""
context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
context = replace(context, "{env state}" => "")
context = replace(context, "{longterm memory}" => "")
prompt = replace(prompt, "{context}" => context)
prompt = replace(prompt, "{usermsg}" => "Stimulus: $usermsg")
return prompt
end
function actor_mistral_openorca(a::agentReflex, usermsg::T) where {T<:AbstractString}
"""
general prompt format:
"
<|im_start|>system
{role}
{tools}
{thinkingFormat}
<|im_end|>
{context}
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"
Note:
{context} =
"
{earlierConversation}
{env state}
{shortterm memory}
{longterm memory}
"
"""
prompt =
"""
<|im_start|>system
{role}
{tools}
{thinkingFormat}
<|im_end|>
{context}
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"""
prompt = replace(prompt, "{role}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:actor])
toolnames = ""
toollines = ""
for (toolname, v) in a.tools
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
toollines *= toolline
toolnames *= "$toolname, "
end
prompt = replace(prompt, "{toolnames}" => toolnames)
prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
context =
"""
{env state}
{longterm memory}
"""
# context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
context = replace(context, "{env state}" => "")
context = replace(context, "{longterm memory}" => "")
prompt = replace(prompt, "{context}" => context)
prompt = replace(prompt, "{usermsg}" => "Stimulus: $usermsg")
return prompt
end
"""
Chat with llm.
```jldoctest
julia> using JSON3, UUIDs, Dates, FileIO, CommUtils, ChatAgent
julia> mqttClientSpec = (
clientName= "someclient", # name of this client
clientID= "$(uuid4())",
broker= "mqtt.yiem.ai",
pubtopic= (imgAI="img/api/v0.0.1/gpu/request",
txtAI="txt/api/v0.1.0/gpu/request"),
subtopic= (imgAI="agent/api/v0.1.0/img/respond",
txtAI="agent/api/v0.1.0/txt/respond"),
keepalive= 30,
)
julia> msgMeta = Dict(
:msgPurpose=> "updateStatus",
:from=> "agent",
:to=> "llmAI",
:requestrespond=> "request",
:sendto=> "", # destination topic
:replyTo=> "agent/api/v0.1.0/txt/respond", # requester ask responder to send reply to this topic
:repondToMsgId=> "", # responder is responding to this msg id
:taskstatus=> "", # "complete", "fail", "waiting" or other status
:timestamp=> Dates.now(),
:msgId=> "$(uuid4())",
)
julia> newAgent = ChatAgent.agentReact(
"Jene",
mqttClientSpec,
role=:assistant_react,
msgMeta=msgMeta
)
julia> respond = ChatAgent.conversation(newAgent, "Hi! how are you?")
```
"""
function conversation(a::T, usermsg::String) where {T<:agent}
respond = nothing
if a.thought != "nothing" # continue thought
_ = addNewMessage(a, "user", usermsg)
a.thought *= "Obs $(a.thinkinground): $usermsg\n"
prompt = a.thought
respond = work(a, prompt)
else # new thought
thinkingmode = chooseThinkingMode(a, usermsg)
@show thinkingmode
if thinkingmode == :no_thinking
a.context = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details
_ = addNewMessage(a, "user", usermsg)
prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode)
@show prompt
respond = sendReceivePrompt(a, prompt)
respond = split(respond, "<|im_end|>")[1]
respond = replace(respond, "\n" => "")
_ = addNewMessage(a, "assistant", respond)
@show respond
elseif thinkingmode == :thinking
a.context = conversationSummary(a)
_ = addNewMessage(a, "user", usermsg)
prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode)
respond = work(a, prompt)
else
error("undefined condition thinkingmode = $thinkingmode")
end
end
return respond
end
"""
Continuously run llm functions except when llm is getting Answer: or chatbox.
There are many work() depend on thinking mode.
"""
function work(a::T, prompt::String, maxround::Int=3) where {T<:agent}
respond = nothing
while true
a.thinkinground += 1
@show a.thinkinground
toolname = nothing
toolinput = nothing
if a.thinkinground > a.thinkingroundlimit
a.thought *= "Thought $(a.thinkinground): I think I know the answer."
prompt = a.thought
end
@show prompt
respond = sendReceivePrompt(a, prompt)
headerToDetect = nothing
if a.thinkinground == 1
try
respond = split(respond, "Obs:")[1]
headerToDetect = ["Question:", "Plan:", "Thought:", "Act:", "ActInput:", "Obs:", "...", "Answer:",
"Conclusion:", "Summary:"]
catch
end
else
try
respond = split(respond, "Obs $(a.thinkinground):")[1]
headerToDetect = ["Question $(a.thinkinground):", "Plan $(a.thinkinground):",
"Thought $(a.thinkinground):", "Act $(a.thinkinground):",
"ActInput $(a.thinkinground):", "Obs $(a.thinkinground):",
"...", "Answer:",
"Conclusion:", "Summary:"]
catch
end
end
@show respond
headers = detectCharacters(respond, headerToDetect)
chunkedtext = chunktext(respond, headers)
Answer = findDetectedCharacter(headers, "Answer:")
AnswerInd = length(Answer) != 0 ? Answer[1] : nothing
Act = findDetectedCharacter(headers, "Act $(a.thinkinground):")
if length(Answer) == 1 && length(Act) == 0
a.thought = "nothing" # assignment finished, no more thought
a.context = "nothing"
a.thinkinground = 0
respond = chunkedtext[AnswerInd][:body]
respond = replace(respond, "<|im_end|>"=>"")
_ = addNewMessage(a, "assistant", respond)
break
else
# check for tool being called
ActHeader = a.thinkinground == 1 ? "Act:" : "Act $(a.thinkinground):"
if length(findDetectedCharacter(headers, ActHeader)) != 0 # check whether there is Act: in a respond
ActInd = findDetectedCharacter(headers, ActHeader)[1]
toolname = toolNameBeingCalled(chunkedtext[ActInd][:body], a.tools)
end
ActInputHeader = a.thinkinground == 1 ? "ActInput:" : "ActInput $(a.thinkinground):"
if length(findDetectedCharacter(headers, ActInputHeader)) != 0 # check whether there is ActInput: in a respond
ActInputInd = findDetectedCharacter(headers, ActInputHeader)[1]
toolinput = chunkedtext[ActInputInd][:body]
end
# clean up
if occursin(" \"", toolinput)
toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " \"", "\"\n")
else
toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " ", "\n")
end
@show toolname
@show toolinput
if toolname === nothing || toolinput === nothing
println("toolname $toolname toolinput $toolinput retry thinking")
a.thinkinground -= 1
continue
end
if a.thought == "nothing"
thought = ""
for i in chunkedtext
header = i[:header]
header = replace(header, ":"=>" $(a.thinkinground):") # add number so that llm not confused
body = i[:body]
thought *= "$header $body"
end
a.thought = prompt * thought
else
a.thought *= respond
end
if toolname == "chatbox" # chat with user
a.thought *= toolinput
respond = toolinput
_ = addNewMessage(a, "assistant", respond)
break
else # function call
f = a.tools[Symbol(toolname)][:func]
_result = f(toolinput)
if _result != "No info available." #TODO for use with wikisearch(). Not good for other tools
_result = makeSummary(a, _result)
end
result = "Obs $(a.thinkinground): $_result\n"
a.thought *= result
prompt = a.thought
end
end
end
@show respond
return respond
end
function conversation(a::agentReflex, usermsg::String; attemptlimit::Int=3)
a.attemptlimit = attemptlimit
respond = nothing
# determine thinking mode
a.thinkingmode = chooseThinkingMode(a, usermsg)
@show a.thinkingmode
if a.thinkingmode == :no_thinking
a.earlierConversation = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details
_ = addNewMessage(a, "user", usermsg)
prompt = chat_mistral_openorca(a, usermsg) #TODO rewrite this function
@show prompt
respond = sendReceivePrompt(a, prompt)
respond = split(respond, "<|im_end|>")[1]
respond = replace(respond, "\n" => "")
_ = addNewMessage(a, "assistant", respond)
@show respond
else
respond = work(a, usermsg)
end
return respond
end
function work(a::agentReflex, usermsg::String)
if a.thinkingmode == :new_thinking
a.earlierConversation = conversationSummary(a)
_ = addNewMessage(a, "user", usermsg)
elseif a.thinkingmode == :continue_thinking
error("continue_thinking")
_ = addNewMessage(a, "user", usermsg)
a.thought *= "Obs $(a.attempt): $usermsg\n"
else
error("undefined condition thinkingmode = $thinkingmode")
end
while true # Work loop
# plan
a.attempt += 1
if a.attempt <= a.attemptlimit
else # attempt limit reached
end
@show a.attempt
@show usermsg
logmsg = "user: $usermsg\n"
a.memory[:shortterm] *= logmsg
toolname = nothing
toolinput = nothing
prompt = planner_mistral_openorca(a, usermsg)
@show prompt
respond = sendReceivePrompt(a, prompt)
plan = split(respond, "<|im_end|>")[1]
plan = split(plan, "Response:")[1]
_plan = replace(plan, "Plan:"=>"Plan $(a.attempt):")
logmsg = "assistant: $_plan\n"
a.memory[:shortterm] *= logmsg
actorstate, msgToUser = actor(a, plan)
#WORKING if actorstate == "chatbox" break work loop and get back to user
if actorstate == "chatbox"
return msgToUser
end
# evaluate
end
end
"""
Actor function.
"""
function actor(a::agentReflex, plan::T) where {T<:AbstractString}
actorState = nothing
@show plan
totalsteps = checkTotalStepInPlan(a, plan)
msgToUser = nothing
a.step = 0
while true # Actor loop
a.step += 1
@show a.step
if a.step <= totalsteps
stepdetail = extractStepFromPlan(a, plan, a.step)
prompt = actor_mistral_openorca(a, stepdetail)
@show prompt
respond = sendReceivePrompt(a, prompt)
respond = split(respond, "<|im_end|>")[1]
@show respond
headerToDetect = ["Question:", "Plan:", "Thought:", "Act:", "ActInput:", "Obs:", "...", "Answer:",
"Conclusion:", "Summary:"]
headers = detectCharacters(respond, headerToDetect)
# add to memory
_respond = addStepNumber(respond, headers, a.step)
a.memory[:shortterm] *= _respond
chunkedtext = chunktext(respond, headers)
toolname = toolNameBeingCalled(chunkedtext["Act:"], a.tools)
toolinput = chunkedtext["ActInput:"]
@show toolname
@show toolinput
#WORKING
if toolname == "chatbox" # chat with user
respond = toolinput
_ = addNewMessage(a, "assistant", respond)
msgToUser = respond
actorState = toolname
error("actor done 0")
break
else # function call
f = a.tools[Symbol(toolname)][:func]
result = f(toolinput)
result = "\nObs $(a.step): $result\n"
a.memory[:shortterm] *= result
end
else #TODO finish all steps
actorState = "all steps done"
error("actor done 2")
break
end
end
error("actor done 3")
return actorState, msgToUser
end
""" Write evaluation guideline.
Args:
a, one of ChatAgent's agent.
shorttermMemory, a short term memory that logs what happened.
Return:
An evaluation guideline used to guage AI's work.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
julia> shorttermMemory =
"
user: What's AMD latest product?
assistant: Plan 1: To provide the user with information about AMD's latest product, I will search for the most recent product release from AMD.
1. Search for \"AMD latest product\" using wikisearch tool.
2. Identify the most recent product release mentioned in the search results.
3. Provide the user with the name of the latest product.
Thought 1: The user wants to know about the latest AMD products, so I should use the wikisearch tool to find information on this topic.
Act 1: wikisearch
ActInput 1: \"AMD latest product\"
Obs 1: No info available.
"
julia> evaluationGuideLine = ChatAgent.writeEvaluationRules(agent, shorttermMemory)
```
"""
function writeEvaluationGuidlines(a::agentReflex, shorttermMemory::T) where {T<:AbstractString}
prompt =
"""
<|im_start|>system
You have access to the following tools:
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
Your work:
$shorttermMemory
Your job are:
1. Write an evaluation guideline for your work in order to be able to evaluate your respond.
2. What the respond should be?
<|im_end|>
"""
respond = sendReceivePrompt(a, prompt)
return respond
end
""" Determine a score out of 10 according to evaluation guideline.
Args:
a, one of ChatAgent's agent.
guidelines, an evaluation guideline.
shorttermMemory, a short term memory that logs what happened.
Return:
A score out of 10 based on guideline.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
julia> shorttermMemory =
"
user: What's AMD latest product?
assistant: Plan 1: To provide the user with information about AMD's latest product, I will search for the most recent product release from AMD.
1. Search for \"AMD latest product\" using wikisearch tool.
2. Identify the most recent product release mentioned in the search results.
3. Provide the user with the name of the latest product.
Thought 1: The user wants to know about the latest AMD products, so I should use the wikisearch tool to find information on this topic.
Act 1: wikisearch
ActInput 1: \"AMD latest product\"
Obs 1: No info available."
julia> guideline = "\nEvaluation Guideline:\n1. Check if the user's question has been understood correctly.\n2. Evaluate the steps taken to provide the information requested by the user.\n3. Assess whether the correct tools were used for the task.\n4. Determine if the user's request was successfully fulfilled.\n5. Identify any potential improvements or alternative approaches that could be used in the future.\n\nThe respond should include:\n1. A clear understanding of the user's question.\n2. The steps taken to provide the information requested by the user.\n3. An evaluation of whether the correct tools were used for the task.\n4. A confirmation or explanation if the user's request was successfully fulfilled.\n5. Any potential improvements or alternative approaches that could be used in the future."
julia> score = grading(agent, guideline, shorttermMemory)
2
```
"""
function grading(a, guidelines::T, shorttermMemory::T) where {T<:AbstractString}
prompt =
"""
<|im_start|>system
You have access to the following tools:
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
Evaluation guidelines:
$guidelines
Your work:
$shorttermMemory
You job are:
1. Evaluate your work using the evaluation guidelines.
2. Give yourself a score out of 10 for your work.
Use the following format to answer: Score {}/10.
<|im_end|>
"""
respond = sendReceivePrompt(a, prompt)
score = parse(Int, respond[end-4:end-3])
return score
end
""" Analize work.
Args:
a, one of ChatAgent's agent.
Return:
A report of analized work.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
julia> shorttermMemory =
"
user: What's AMD latest product?
assistant: Plan 1: To provide the user with information about AMD's latest product, I will search for the most recent product release from AMD.
1. Search for \"AMD latest product\" using wikisearch tool.
2. Identify the most recent product release mentioned in the search results.
3. Provide the user with the name of the latest product.
Thought 1: The user wants to know about the latest AMD products, so I should use the wikisearch tool to find information on this topic.
Act 1: wikisearch
ActInput 1: \"AMD latest product\"
Obs 1: No info available."
julia> report = analyze(agent, shorttermMemory)
```
"""
function analyze(a, shorttermMemory::T) where {T<:AbstractString}
prompt =
"""
<|im_start|>system
You have access to the following tools:
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
Your work:
$shorttermMemory
Use the following steps to analize your work in detail.
1. What happened?
2. List all cause and effect relationships.
3. Analyze each relationship to figure it out why it behaved that way.
4. What could you do to improve the respond?
<|im_end|>
"""
respond = sendReceivePrompt(a, prompt)
return respond
end
""" Write a lesson drawn from evaluation.
Args:
a, one of ChatAgent's agent.
report, a report resulted from analyzing shorttermMemory
Return:
A lesson.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
julia> report =
"What happened: I tried to search for AMD's latest product using the wikisearch tool,
but no information was available in the search results.
Cause and effect relationships:
1. Searching \"AMD latest product\" -> No info available.
2. Searching \"most recent product release\" -> No info available.
3. Searching \"latest product\" -> No info available.
Analysis of each relationship:
1. The search for \"AMD latest product\" did not provide any information because the wikisearch tool could not find relevant results for that query.
2. The search for \"most recent product release\" also did not yield any results, indicating that there might be no recent product releases available or that the information is not accessible through the wikisearch tool.
3. The search for \"latest product\" similarly resulted in no information being found, suggesting that either the latest product is not listed on the encyclopedia or it is not easily identifiable using the wikisearch tool.
Improvements: To improve the response, I could try searching for AMD's products on a different
source or search engine to find the most recent product release. Additionally, I could ask
the user for more context or clarify their question to better understand what they are
looking for."
julia> lesson = analyze(agent, report)
```
"""
function selfReflext(a, report::T) where {T<:AbstractString}
prompt =
"""
<|im_start|>system
You have access to the following tools:
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
Your report:
$report
What lesson could be drawn from your report?.
<|im_end|>
"""
respond = sendReceivePrompt(a, prompt)
return respond
end
end # module