1135 lines
32 KiB
Julia
Executable File
1135 lines
32 KiB
Julia
Executable File
module interface
|
|
|
|
|
|
export agentReact, agentReflex,
|
|
addNewMessage, clearMessage, removeLatestMsg, conversation, writeEvaluationGuideline,
|
|
grading, analyze, selfReflext, actor_mistral_openorca2, formulateUserRespond
|
|
|
|
using JSON3, DataStructures, Dates, UUIDs, HTTP
|
|
using CommUtils, GeneralUtils
|
|
using ..type, ..utils
|
|
|
|
# ---------------------------------------------------------------------------- #
|
|
# pythoncall setting #
|
|
# ---------------------------------------------------------------------------- #
|
|
# Ref: https://github.com/JuliaPy/PythonCall.jl/issues/252
|
|
# by setting the following variables, PythonCall will use system python or conda python and
|
|
# packages installed by system or conda
|
|
# if these setting are not set (comment out), PythonCall will use its own python and package that
|
|
# installed by CondaPkg (from env_preparation.jl)
|
|
# ENV["JULIA_CONDAPKG_BACKEND"] = "Null"
|
|
# systemPython = split(read(`which python`, String), "\n")[1]
|
|
# ENV["JULIA_PYTHONCALL_EXE"] = systemPython # find python location with $> which python ex. raw"/root/conda/bin/python"
|
|
|
|
# using PythonCall
|
|
# const py_agents = PythonCall.pynew()
|
|
# const py_llms = PythonCall.pynew()
|
|
# function __init__()
|
|
# # PythonCall.pycopy!(py_cv2, pyimport("cv2"))
|
|
|
|
# # equivalent to from urllib.request import urlopen in python
|
|
# PythonCall.pycopy!(py_agents, pyimport("langchain.agents"))
|
|
# PythonCall.pycopy!(py_llms, pyimport("langchain.llms"))
|
|
# end
|
|
|
|
#------------------------------------------------------------------------------------------------100
|
|
|
|
|
|
""" Add new message to agent.
|
|
|
|
Args:
|
|
|
|
Return:
|
|
|
|
```jldoctest
|
|
julia> addNewMessage(agent1, "user", "Where should I go to buy snacks")
|
|
````
|
|
"""
|
|
function addNewMessage(a::T1, role::String, content::T2) where {T1<:agent, T2<:AbstractString}
|
|
if role ∉ a.availableRole # guard against typo
|
|
error("role is not in agent.availableRole $(@__LINE__)")
|
|
end
|
|
|
|
# check whether user messages exceed limit
|
|
userMsg = 0
|
|
for i in a.messages
|
|
if i[:role] == "user"
|
|
userMsg += 1
|
|
end
|
|
end
|
|
messageleft = 0
|
|
|
|
if userMsg > a.maxUserMsg # delete all conversation
|
|
clearMessage(a)
|
|
messageleft = a.maxUserMsg
|
|
else
|
|
userMsg += 1
|
|
d = Dict(:role=> role, :content=> content, :timestamp=> Dates.now())
|
|
push!(a.messages, d)
|
|
messageleft = a.maxUserMsg - userMsg
|
|
end
|
|
|
|
return messageleft
|
|
end
|
|
|
|
function clearMessage(a::T) where {T<:agent}
|
|
for i in eachindex(a.messages)
|
|
if length(a.messages) > 1 # system instruction will NOT be deleted
|
|
pop!(a.messages)
|
|
else
|
|
break
|
|
end
|
|
end
|
|
a.memory[:shortterm] = OrderedDict{String, Any}()
|
|
a.memory[:log] = OrderedDict{String, Any}()
|
|
end
|
|
|
|
function removeLatestMsg(a::T) where {T<:agent}
|
|
if length(a.messages) > 1
|
|
pop!(a.messages)
|
|
end
|
|
end
|
|
|
|
# function generatePrompt_mistral_openorca(a::T, usermsg::String, role::Symbol) where {T<:agent}
|
|
# prompt =
|
|
# """
|
|
# <|im_start|>system
|
|
# {systemMsg}
|
|
# <|im_end|>
|
|
# Here are the context for the question:
|
|
# {context}
|
|
# """
|
|
# prompt = replace(prompt, "{systemMsg}" => a.roles[role])
|
|
|
|
# toolnames = ""
|
|
# toollines = ""
|
|
# for (toolname, v) in a.tools
|
|
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
|
|
# toollines *= toolline
|
|
# toolnames *= "$toolname,"
|
|
# end
|
|
# prompt = replace(prompt, "{toolnames}" => toolnames)
|
|
# prompt = replace(prompt, "{tools}" => toollines)
|
|
|
|
# prompt = replace(prompt, "{context}" => a.context)
|
|
|
|
# prompt *= "<|im_start|>user\n" * usermsg * "\n<|im_end|>\n"
|
|
# prompt *= "<|im_start|>assistant\n"
|
|
|
|
# return prompt
|
|
# end
|
|
|
|
# function generatePrompt_mistral_openorca(a::T, usermsg::String,
|
|
# thinkingMode::Symbol=:nothinking) where {T<:agent}
|
|
|
|
# prompt =
|
|
# """
|
|
# <|im_start|>system
|
|
# {systemMsg}
|
|
# You have access to the following tools:
|
|
# {tools}
|
|
# {thinkingMode}
|
|
# <|im_end|>
|
|
# Here are the context for the question:
|
|
# {context}
|
|
# """
|
|
# prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
|
|
# prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode])
|
|
# toolnames = ""
|
|
# toollines = ""
|
|
# for (toolname, v) in a.tools
|
|
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
|
|
# toollines *= toolline
|
|
# toolnames *= "$toolname,"
|
|
# end
|
|
# prompt = replace(prompt, "{toolnames}" => toolnames)
|
|
# prompt = replace(prompt, "{tools}" => toollines)
|
|
|
|
# prompt = replace(prompt, "{context}" => a.context)
|
|
|
|
# prompt *= "<|im_start|>user\nQuestion: " * usermsg * "\n<|im_end|>\n"
|
|
# prompt *= "<|im_start|>assistant\n"
|
|
|
|
# return prompt
|
|
# end
|
|
|
|
|
|
function generatePrompt_mistral_openorca(a::T, usermsg::String,
|
|
thinkingMode::Symbol=:nothinking) where {T<:agent}
|
|
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
{systemMsg}
|
|
{tools}
|
|
{thinkingMode}
|
|
<|im_end|>
|
|
Here are the context for the stimulus:
|
|
{context}
|
|
"""
|
|
prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
|
|
prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode])
|
|
toolnames = ""
|
|
toollines = ""
|
|
for (toolname, v) in a.tools
|
|
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
|
|
toollines *= toolline
|
|
toolnames *= "$toolname,"
|
|
end
|
|
prompt = replace(prompt, "{toolnames}" => toolnames)
|
|
|
|
prompt = replace(prompt, "{context}" => a.context)
|
|
|
|
prompt *= "<|im_start|>user\nStimulus: " * usermsg * "\n<|im_end|>\n"
|
|
prompt *= "<|im_start|>assistant\n"
|
|
|
|
return prompt
|
|
end
|
|
|
|
function chat_mistral_openorca(a::agentReflex, usermsg::String)
|
|
"""
|
|
general prompt format:
|
|
|
|
"
|
|
<|im_start|>system
|
|
{role}
|
|
{tools}
|
|
{thinkingFormat}
|
|
<|im_end|>
|
|
{context}
|
|
<|im_start|>user
|
|
{usermsg}
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"
|
|
|
|
Note:
|
|
{context} =
|
|
"
|
|
{earlierConversation}
|
|
{env state}
|
|
{shortterm memory}
|
|
{longterm memory}
|
|
"
|
|
"""
|
|
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
{role}
|
|
{thinkingFormat}
|
|
<|im_end|>
|
|
{context}
|
|
<|im_start|>user
|
|
{usermsg}
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"""
|
|
prompt = replace(prompt, "{role}" => a.roles[a.role])
|
|
prompt = replace(prompt, "{thinkingFormat}" => "")
|
|
|
|
context =
|
|
"""
|
|
{earlierConversation}
|
|
{env state}
|
|
{longterm memory}
|
|
"""
|
|
context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
|
|
context = replace(context, "{env state}" => "")
|
|
context = replace(context, "{longterm memory}" => "")
|
|
|
|
prompt = replace(prompt, "{context}" => context)
|
|
|
|
prompt = replace(prompt, "{usermsg}" => "Stimulus: $usermsg")
|
|
|
|
return prompt
|
|
end
|
|
|
|
|
|
function planner_mistral_openorca(a::agentReflex)
|
|
"""
|
|
general prompt format:
|
|
|
|
"
|
|
<|im_start|>system
|
|
{role}
|
|
{tools}
|
|
{thinkingFormat}
|
|
<|im_end|>
|
|
{context}
|
|
<|im_start|>user
|
|
{usermsg}
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"
|
|
|
|
Note:
|
|
{context} =
|
|
"
|
|
{earlierConversation}
|
|
{env state}
|
|
{shortterm memory}
|
|
{longterm memory}
|
|
"
|
|
"""
|
|
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
{role}
|
|
{roleSpecificKnowledge}
|
|
{tools}
|
|
{thinkingFormat}
|
|
{context}
|
|
<|im_end|>
|
|
<|im_start|>user
|
|
{usermsg}
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
Plan:
|
|
"""
|
|
prompt = replace(prompt, "{role}" => a.roles[a.role])
|
|
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:planner])
|
|
roleSpecificKnowledge =
|
|
"""
|
|
Info you need from the user to be able to help them selecting their best wine:
|
|
- type of food
|
|
- occasion
|
|
- user's personal taste of wine
|
|
- wine price range
|
|
- ambient temperature at the serving location
|
|
- wines we have in stock
|
|
You job is to provide a personalized recommendation of up to two wines based on the user's info above, and you describe the benefits of each wine in detail.
|
|
"""
|
|
prompt = replace(prompt, "{roleSpecificKnowledge}" => roleSpecificKnowledge)
|
|
toolnames = ""
|
|
toollines = ""
|
|
for (toolname, v) in a.tools
|
|
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
|
|
toollines *= toolline
|
|
toolnames *= "$toolname,"
|
|
end
|
|
|
|
prompt = replace(prompt, "{toolnames}" => toolnames)
|
|
prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
|
|
|
|
|
|
# prepare contex
|
|
context =
|
|
"""
|
|
My earlier talk with the user:
|
|
$(a.earlierConversation)
|
|
|
|
My earlier experience
|
|
$(experience(a.memory[:longterm]))
|
|
"""
|
|
|
|
prompt = replace(prompt, "{context}" => context)
|
|
|
|
# initialize short term memory
|
|
prompt = replace(prompt, "{usermsg}" => "Stimulus: $(a.memory[:shortterm]["user:"])")
|
|
|
|
return prompt
|
|
end
|
|
|
|
|
|
function actor_mistral_openorca(a::agentReflex)
|
|
"""
|
|
general prompt format:
|
|
|
|
"
|
|
<|im_start|>system
|
|
{role}
|
|
{tools}
|
|
{thinkingFormat}
|
|
<|im_end|>
|
|
{context}
|
|
<|im_start|>user
|
|
{usermsg}
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"
|
|
|
|
Note:
|
|
{context} =
|
|
"
|
|
{earlierConversation}
|
|
{env state}
|
|
{shortterm memory}
|
|
{longterm memory}
|
|
"
|
|
"""
|
|
|
|
mark = "$(a.step)"
|
|
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
{role}
|
|
{tools}
|
|
{thinkingFormat}
|
|
{context}
|
|
<|im_end|>
|
|
{shorttermMemory}
|
|
Thought $(a.step):
|
|
"""
|
|
|
|
prompt = replace(prompt, "{role}" => a.roles[a.role])
|
|
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:actor])
|
|
prompt = replace(prompt, "{step}" => a.step)
|
|
|
|
s = dictToString(a.memory[:shortterm], ["user:", "Plan 1:"])
|
|
prompt = replace(prompt, "{shorttermMemory}" => s)
|
|
|
|
toolnames = ""
|
|
toollines = ""
|
|
for (toolname, v) in a.tools
|
|
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
|
|
toollines *= toolline
|
|
toolnames *= "$toolname, "
|
|
end
|
|
prompt = replace(prompt, "{toolnames}" => toolnames)
|
|
prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
|
|
|
|
context =
|
|
"""
|
|
{env state}
|
|
{longterm memory}
|
|
{plan}
|
|
"""
|
|
# context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
|
|
context = replace(context, "{env state}" => "")
|
|
context = replace(context, "{longterm memory}" => "")
|
|
context = replace(context, "{plan}" => "My plan:\n$(a.memory[:shortterm]["Plan $(a.attempt):"])")
|
|
prompt = replace(prompt, "{context}" => context)
|
|
|
|
return prompt
|
|
end
|
|
|
|
|
|
|
|
"""
|
|
Chat with llm.
|
|
|
|
```jldoctest
|
|
julia> using JSON3, UUIDs, Dates, FileIO, CommUtils, ChatAgent
|
|
julia> mqttClientSpec = (
|
|
clientName= "someclient", # name of this client
|
|
clientID= "$(uuid4())",
|
|
broker= "mqtt.yiem.ai",
|
|
pubtopic= (imgAI="img/api/v0.0.1/gpu/request",
|
|
txtAI="txt/api/v0.1.0/gpu/request"),
|
|
subtopic= (imgAI="agent/api/v0.1.0/img/respond",
|
|
txtAI="agent/api/v0.1.0/txt/respond"),
|
|
keepalive= 30,
|
|
)
|
|
julia> msgMeta = Dict(
|
|
:msgPurpose=> "updateStatus",
|
|
:from=> "agent",
|
|
:to=> "llmAI",
|
|
:requestrespond=> "request",
|
|
:sendto=> "", # destination topic
|
|
:replyTo=> "agent/api/v0.1.0/txt/respond", # requester ask responder to send reply to this topic
|
|
:repondToMsgId=> "", # responder is responding to this msg id
|
|
:taskstatus=> "", # "complete", "fail", "waiting" or other status
|
|
:timestamp=> Dates.now(),
|
|
:msgId=> "$(uuid4())",
|
|
)
|
|
julia> newAgent = ChatAgent.agentReact(
|
|
"Jene",
|
|
mqttClientSpec,
|
|
role=:assistant_react,
|
|
msgMeta=msgMeta
|
|
)
|
|
julia> respond = ChatAgent.conversation(newAgent, "Hi! how are you?")
|
|
```
|
|
"""
|
|
function conversation(a::agentReflex, usermsg::String; attemptlimit::Int=3)
|
|
a.attemptlimit = attemptlimit
|
|
respond = nothing
|
|
|
|
a.earlierConversation = conversationSummary(a)
|
|
|
|
# determine thinking mode
|
|
a.thinkingmode = chooseThinkingMode(a, usermsg)
|
|
@show a.thinkingmode
|
|
|
|
if a.thinkingmode == :no_thinking
|
|
_ = addNewMessage(a, "user", usermsg)
|
|
prompt = chat_mistral_openorca(a, usermsg)
|
|
println("")
|
|
@show prompt
|
|
respond = sendReceivePrompt(a, prompt)
|
|
respond = split(respond, "<|im_end|>")[1]
|
|
respond = replace(respond, "\n" => "")
|
|
_ = addNewMessage(a, "assistant", respond)
|
|
println("")
|
|
@show respond
|
|
else
|
|
respond = work(a, usermsg)
|
|
end
|
|
|
|
return respond
|
|
end
|
|
|
|
"""
|
|
Continuously run llm functions except when llm is getting Answer: or chatbox.
|
|
There are many work() depend on thinking mode.
|
|
"""
|
|
function work(a::agentReflex, usermsg::String)
|
|
respond = nothing
|
|
|
|
if a.thinkingmode == :new_thinking
|
|
_ = addNewMessage(a, "user", usermsg)
|
|
a.memory[:shortterm] = OrderedDict{String, Any}()
|
|
a.memory[:log] = OrderedDict{String, Any}()
|
|
a.memory[:shortterm]["user:"] = usermsg
|
|
a.memory[:log]["user:"] = usermsg
|
|
a.newplan = true
|
|
elseif a.thinkingmode == :continue_thinking
|
|
println("continue_thinking!!")
|
|
_ = addNewMessage(a, "user", usermsg)
|
|
a.memory[:shortterm]["Obs $(a.step):"] = usermsg
|
|
a.memory[:log]["Obs $(a.step):"] = usermsg
|
|
else
|
|
error("undefined condition thinkingmode = $thinkingmode $(@__LINE__)")
|
|
end
|
|
|
|
while true # Work loop
|
|
# plan
|
|
if a.attempt <= a.attemptlimit
|
|
toolname = nothing
|
|
toolinput = nothing
|
|
if a.newplan == true
|
|
a.attempt += 1
|
|
a.step = 0
|
|
prompt_plan = planner_mistral_openorca(a)
|
|
println("")
|
|
@show prompt_plan
|
|
respond = sendReceivePrompt(a, prompt_plan, max_tokens=1024)
|
|
|
|
# sometimes LLM add not-need word I don't want
|
|
plan = splittext(respond, ["Step 1", "<|im_end|>", "Response", "Execution",
|
|
"Result", "Recommendation", "My response"])
|
|
# plan = replace(plan, "Plan:"=>"")
|
|
println("")
|
|
@show plan
|
|
|
|
a.newplan = false
|
|
a.memory[:shortterm]["Plan $(a.attempt):"] = plan
|
|
a.memory[:log]["Plan $(a.attempt):"] = plan
|
|
end
|
|
|
|
println("")
|
|
@show a.attempt
|
|
|
|
# enter actor loop
|
|
actorstate, msgToUser = actor(a)
|
|
|
|
if actorstate == "chatbox"
|
|
respond = msgToUser
|
|
break
|
|
elseif actorstate == "all steps done" || actorstate == "formulateUserRespond"
|
|
println("all steps done")
|
|
|
|
respond = formulateUserRespond(a)
|
|
|
|
println("")
|
|
formulatedRespond = respond
|
|
@show formulatedRespond
|
|
|
|
a.memory[:shortterm]["Respond $(a.attempt):"] = respond
|
|
a.memory[:log]["Respond $(a.attempt):"] = respond
|
|
|
|
# evaluate. if score < 8/10 try again.
|
|
guideline = writeEvaluationGuideline(a, a.memory[:shortterm]["user:"])
|
|
|
|
println("")
|
|
@show guideline
|
|
|
|
score = grading(a, guideline, respond)
|
|
@show score
|
|
if score >= 6 # good enough answer
|
|
break
|
|
else # self evaluate and reflect then try again
|
|
analysis = analyze(a)
|
|
println("")
|
|
@show analysis
|
|
|
|
lessonwithcontext = selfReflext(a, analysis)
|
|
|
|
println("")
|
|
@show lessonwithcontext
|
|
|
|
newdict = OrderedDict()
|
|
|
|
a.memory[:shortterm] = keepOnlyKeys(a.memory[:shortterm], ["user:"])
|
|
|
|
headerToDetect = ["Lesson:", "Context:", ]
|
|
headers = detectCharacters(lessonwithcontext, headerToDetect)
|
|
chunkedtext = chunktext(lessonwithcontext, headers)
|
|
|
|
a.memory[:longterm][chunkedtext["Context:"]] = chunkedtext["Lesson:"]
|
|
a.newplan = true
|
|
println("")
|
|
println("RETRY $(a.attempt +1)")
|
|
println("")
|
|
|
|
end
|
|
else
|
|
error("undefied condition, actorstate $actorstate $(@__LINE__)")
|
|
break
|
|
end
|
|
else
|
|
error("attempt limit reach")
|
|
break
|
|
end
|
|
end
|
|
|
|
# good enough answer
|
|
|
|
|
|
# communicates with user
|
|
_ = addNewMessage(a, "assistant", respond)
|
|
return respond
|
|
end
|
|
|
|
|
|
# function evaluate()
|
|
|
|
# end
|
|
|
|
|
|
|
|
"""
|
|
Actor function.
|
|
|
|
Args:
|
|
a, one of ChatAgent's agent.
|
|
plan, a step by step plan to respond
|
|
|
|
Return:
|
|
case 1) if actor complete the plan successfully.
|
|
actorState = "all steps done" inidicates that all step in plan were done.
|
|
msgToUser = nothing.
|
|
case 2) if actor needs to talk to user for more context
|
|
actorState = "chatbox"
|
|
msgToUser = "message from assistant to user"
|
|
|
|
"""
|
|
function actor(a::agentReflex)
|
|
actorState = nothing
|
|
msgToUser = nothing
|
|
|
|
totalsteps = checkTotalStepInPlan(a)
|
|
|
|
while true # Actor loop
|
|
if a.step == 0
|
|
a.step = 1
|
|
else
|
|
decision, reason = goNogo(a)
|
|
println("")
|
|
@show decision
|
|
@show reason
|
|
# a.memory[:shortterm]["Check $(a.step):"] = reason
|
|
if decision == "Yes" # in case there is a cancel, go straight to evaluation
|
|
a.step += 1
|
|
elseif decision == "No"
|
|
# repeat the latest step
|
|
a.memory[:shortterm] = removeHeaders(a.memory[:shortterm], a.step, ["Plan"])
|
|
a.memory[:log] = removeHeaders(a.memory[:log], a.step, ["Plan"])
|
|
println("repeating step $(a.step)")
|
|
elseif decision == "formulateUserRespond"
|
|
actorState = "formulateUserRespond"
|
|
msgToUser = nothing
|
|
break
|
|
else
|
|
error("undefined condition decision = $decision $(@__LINE__)")
|
|
end
|
|
end
|
|
|
|
|
|
@show a.step
|
|
if a.step < totalsteps
|
|
|
|
prompt_actor = actor_mistral_openorca(a)
|
|
|
|
println("")
|
|
@show prompt_actor
|
|
|
|
respond = sendReceivePrompt(a, prompt_actor)
|
|
respond = splittext(respond, ["Obs", "<|im_end|>"])
|
|
|
|
if !occursin("Thought", respond)
|
|
respond = "Thought: " * respond
|
|
end
|
|
|
|
headerToDetect = ["Question:", "Plan:", "Thought:",
|
|
"Act:", "Actinput:", "Obs:", "...",
|
|
"Answer:", "Conclusion:", "Summary:"]
|
|
|
|
# replace headers with headers with correct attempt and step number
|
|
respond = replaceHeaders(respond, headerToDetect, a.step)
|
|
|
|
headers = detectCharacters(respond, headerToDetect)
|
|
|
|
println("")
|
|
respond_actor = respond
|
|
@show respond_actor
|
|
|
|
headerToDetect = ["Plan $(a.attempt):",
|
|
"Thought $(a.step):",
|
|
"Act $(a.step):",
|
|
"Actinput $(a.step):",
|
|
"Obs $(a.step):",
|
|
"Check $(a.step):",]
|
|
headers = detectCharacters(respond, headerToDetect)
|
|
chunkedtext = chunktext(respond, headers)
|
|
|
|
|
|
# add to memory
|
|
a.memory[:shortterm] = addShortMem!(a.memory[:shortterm], chunkedtext)
|
|
a.memory[:log] = addShortMem!(a.memory[:log], chunkedtext)
|
|
|
|
toolname = toolNameBeingCalled(chunkedtext["Act $(a.step):"], a.tools)
|
|
toolinput = chunkedtext["Actinput $(a.step):"]
|
|
@show toolname
|
|
@show toolinput
|
|
|
|
if toolname == "chatbox" # chat with user
|
|
msgToUser = toolinput
|
|
actorState = toolname
|
|
break
|
|
elseif toolname == "formulateUserRespond"
|
|
msgToUser = toolinput
|
|
actorState = toolname
|
|
break
|
|
elseif toolname == "skipstep"
|
|
# skip
|
|
else # function call
|
|
f = a.tools[Symbol(toolname)][:func]
|
|
toolresult = f(a, toolinput)
|
|
@show toolresult
|
|
a.memory[:shortterm]["Obs $(a.step):"] = toolresult
|
|
a.memory[:log]["Obs $(a.step):"] = toolresult
|
|
end
|
|
else
|
|
actorState = "all steps done"
|
|
msgToUser = nothing
|
|
break
|
|
end
|
|
end
|
|
|
|
return actorState, msgToUser
|
|
end
|
|
|
|
|
|
|
|
""" Write evaluation guideline.
|
|
|
|
Args:
|
|
a, one of ChatAgent's agent.
|
|
usermsg, stimulus e.g. question, task and etc.
|
|
|
|
Return:
|
|
An evaluation guideline used to guage AI's work.
|
|
|
|
# Example
|
|
|
|
```jldoctest
|
|
julia> using ChatAgent, CommUtils
|
|
julia> agent = ChatAgent.agentReflex("Jene")
|
|
julia> usermsg = "What's AMD latest product?"
|
|
"
|
|
julia> evaluationGuideLine = writeEvaluationGuideline(agent, usermsg)
|
|
```
|
|
"""
|
|
function writeEvaluationGuideline(a::agentReflex, usermsg::T) where {T<:AbstractString}
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
You have access to the following tools:
|
|
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
|
|
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
|
|
|
|
Your work:
|
|
$usermsg
|
|
|
|
Your job are:
|
|
1. Write an evaluation guideline for your work in order to be able to evaluate your respond.
|
|
2. An example of what the respond should be.
|
|
<|im_end|>
|
|
"""
|
|
|
|
respond = sendReceivePrompt(a, prompt)
|
|
return respond
|
|
end
|
|
|
|
|
|
|
|
""" Determine a score out of 10 according to evaluation guideline.
|
|
|
|
Args:
|
|
a, one of ChatAgent's agent.
|
|
guidelines, an evaluation guideline.
|
|
shorttermMemory, a short term memory that logs what happened.
|
|
|
|
Return:
|
|
A score out of 10 based on guideline.
|
|
|
|
# Example
|
|
|
|
```jldoctest
|
|
julia> using ChatAgent, CommUtils
|
|
julia> agent = ChatAgent.agentReflex("Jene")
|
|
julia> shorttermMemory = OrderedDict{String, Any}(
|
|
"user" => "What's the latest AMD GPU?",
|
|
"Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n",
|
|
"Act 1:" => " wikisearch\n",
|
|
"Actinput 1:" => " amd gpu latest\n",
|
|
"Obs 1:" => "No info available for your search query.",
|
|
"Act 2:" => " wikisearch\n",
|
|
"Actinput 2:" => " amd graphics card latest\n",
|
|
"Obs 2:" => "No info available for your search query.")
|
|
julia> guideline = "\nEvaluation Guideline:\n1. Check if the user's question has been understood correctly.\n2. Evaluate the steps taken to provide the information requested by the user.\n3. Assess whether the correct tools were used for the task.\n4. Determine if the user's request was successfully fulfilled.\n5. Identify any potential improvements or alternative approaches that could be used in the future.\n\nThe respond should include:\n1. A clear understanding of the user's question.\n2. The steps taken to provide the information requested by the user.\n3. An evaluation of whether the correct tools were used for the task.\n4. A confirmation or explanation if the user's request was successfully fulfilled.\n5. Any potential improvements or alternative approaches that could be used in the future."
|
|
julia> score = grading(agent, guideline, shorttermMemory)
|
|
2
|
|
```
|
|
"""
|
|
function grading(a, guideline::T, text::T) where {T<:AbstractString}
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
You have access to the following tools:
|
|
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
|
|
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
|
|
|
|
$guideline
|
|
|
|
Your respond: $text
|
|
|
|
You job are:
|
|
1. Evaluate your respond using the evaluation guideline and an example respond.
|
|
2. Give yourself a score out of 10 for your respond.
|
|
|
|
Use the following format to answer:
|
|
{Evaluation} Score {}/10.
|
|
<|im_end|>
|
|
"""
|
|
println("")
|
|
prompt_grading = prompt
|
|
@show prompt_grading
|
|
|
|
respond = sendReceivePrompt(a, prompt)
|
|
|
|
println("")
|
|
respond_grading = respond
|
|
@show respond_grading
|
|
|
|
_score = split(respond[end-5:end], "/")[1]
|
|
_score = split(_score, " ")[end]
|
|
score = parse(Int, _score)
|
|
return score
|
|
end
|
|
|
|
|
|
|
|
""" Analize work.
|
|
|
|
Args:
|
|
a, one of ChatAgent's agent.
|
|
|
|
Return:
|
|
A report of analized work.
|
|
|
|
# Example
|
|
|
|
```jldoctest
|
|
julia> using ChatAgent, CommUtils
|
|
julia> agent = ChatAgent.agentReflex("Jene")
|
|
julia> shorttermMemory = OrderedDict{String, Any}(
|
|
"user:" => "What's the latest AMD GPU?",
|
|
"Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n",
|
|
"Act 1:" => " wikisearch\n",
|
|
"Actinput 1:" => " amd gpu latest\n",
|
|
"Obs 1:" => "No info available for your search query.",
|
|
"Act 2:" => " wikisearch\n",
|
|
"Actinput 2:" => " amd graphics card latest\n",
|
|
"Obs 2:" => "No info available for your search query.")
|
|
julia> report = analyze(agent, shorttermMemory)
|
|
```
|
|
"""
|
|
function analyze(a)
|
|
shorttermMemory = dictToString(a.memory[:shortterm], ["user:"])
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
You have access to the following tools:
|
|
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
|
|
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
|
|
|
|
Your work:
|
|
$shorttermMemory
|
|
|
|
You job is to do each of the following steps in detail to analize your work.
|
|
1. What happened?
|
|
2. List all relationships, each with cause and effect.
|
|
3. Look at each relationship, figure out why it behaved that way.
|
|
4. What could you do to improve the respond?
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"""
|
|
|
|
respond = sendReceivePrompt(a, prompt, max_tokens=1024, timeout=180)
|
|
|
|
return respond
|
|
end
|
|
|
|
|
|
""" Write a lesson drawn from evaluation.
|
|
|
|
Args:
|
|
a, one of ChatAgent's agent.
|
|
report, a report resulted from analyzing shorttermMemory
|
|
|
|
Return:
|
|
A lesson.
|
|
|
|
# Example
|
|
|
|
```jldoctest
|
|
julia> using ChatAgent, CommUtils
|
|
julia> agent = ChatAgent.agentReflex("Jene")
|
|
julia> report =
|
|
"What happened: I tried to search for AMD's latest product using the wikisearch tool,
|
|
but no information was available in the search results.
|
|
Cause and effect relationships:
|
|
1. Searching \"AMD latest product\" -> No info available.
|
|
2. Searching \"most recent product release\" -> No info available.
|
|
3. Searching \"latest product\" -> No info available.
|
|
Analysis of each relationship:
|
|
1. The search for \"AMD latest product\" did not provide any information because the wikisearch tool could not find relevant results for that query.
|
|
2. The search for \"most recent product release\" also did not yield any results, indicating that there might be no recent product releases available or that the information is not accessible through the wikisearch tool.
|
|
3. The search for \"latest product\" similarly resulted in no information being found, suggesting that either the latest product is not listed on the encyclopedia or it is not easily identifiable using the wikisearch tool.
|
|
Improvements: To improve the response, I could try searching for AMD's products on a different
|
|
source or search engine to find the most recent product release. Additionally, I could ask
|
|
the user for more context or clarify their question to better understand what they are
|
|
looking for."
|
|
julia> lesson = selfReflext(agent, report)
|
|
```
|
|
"""
|
|
function selfReflext(a, analysis::T) where {T<:AbstractString}
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
You have access to the following tools:
|
|
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
|
|
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
|
|
|
|
Your report:
|
|
$analysis
|
|
|
|
Your job are:
|
|
1. Lesson: what lesson could you learn from your report?.
|
|
2. Context: what is the context this lesson could apply to?
|
|
<|im_end|>
|
|
"""
|
|
|
|
respond = sendReceivePrompt(a, prompt, max_tokens=2048)
|
|
return respond
|
|
end
|
|
|
|
|
|
""" Formulate a respond from work for user's stimulus.
|
|
|
|
Args:
|
|
a, one of ChatAgent's agent.
|
|
|
|
Return:
|
|
A respond for user's stimulus.
|
|
|
|
# Example
|
|
```jldoctest
|
|
julia> using ChatAgent, CommUtils
|
|
julia> agent = ChatAgent.agentReflex("Jene")
|
|
julia> shorttermMemory = OrderedDict{String, Any}(
|
|
"user:" => "What's the latest AMD GPU?",
|
|
"Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n",
|
|
"Act 1:" => " wikisearch\n",
|
|
"Actinput 1:" => " amd gpu latest\n",
|
|
"Obs 1:" => "No info available for your search query.",
|
|
"Act 2:" => " wikisearch\n",
|
|
"Actinput 2:" => " amd graphics card latest\n",
|
|
"Obs 2:" => "No info available for your search query.")
|
|
|
|
julia> report = formulateUserRespond(agent, shorttermMemory)
|
|
```
|
|
"""
|
|
function formulateUserRespond(a)
|
|
stimulus = a.memory[:shortterm]["user:"]
|
|
|
|
work = dictToString(a.memory[:shortterm], ["user:"])
|
|
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
Symbol:
|
|
Stimulus: the input user gives to you and you must respond
|
|
Plan: a plan
|
|
Thought: your thought
|
|
Act: the action you took
|
|
Actinput: the input to the action
|
|
Obs: the result of the action
|
|
|
|
Stimulus:
|
|
$stimulus
|
|
|
|
Your work:
|
|
$work
|
|
|
|
From your work, formulate a respond for user's stimulus.
|
|
<|im_end|>
|
|
Respond:
|
|
"""
|
|
respond = sendReceivePrompt(a, prompt)
|
|
return respond
|
|
end
|
|
|
|
|
|
|
|
""" Determine whether LLM should go to next step.
|
|
|
|
Args:
|
|
a, one of ChatAgent's agent.
|
|
|
|
Return:
|
|
"Yes" or "no" decision to go next step.
|
|
|
|
# Example
|
|
```jldoctest
|
|
julia> using ChatAgent, CommUtils
|
|
julia> agent = ChatAgent.agentReflex("Jene")
|
|
julia> shorttermMemory = OrderedDict{String, Any}(
|
|
"user:" => "What's the latest AMD GPU?",
|
|
"Plan 1:" => " To answer this question, I will need to search for the latest AMD GPU using the wikisearch tool.\n",
|
|
"Act 1:" => " wikisearch\n",
|
|
"Actinput 1:" => " amd gpu latest\n",
|
|
"Obs 1:" => "No info available for your search query.",
|
|
"Act 2:" => " wikisearch\n",
|
|
"Actinput 2:" => " amd graphics card latest\n",
|
|
"Obs 2:" => "No info available for your search query.")
|
|
|
|
julia> decision = goNogo(agent)
|
|
"Yes"
|
|
```
|
|
"""
|
|
function goNogo(a)
|
|
stimulus = a.memory[:shortterm]["user:"]
|
|
work = dictToString(a.memory[:shortterm], ["user:"])
|
|
|
|
# prompt =
|
|
# """
|
|
# <|im_start|>system
|
|
# Symbol meaning:
|
|
# Stimulus: the input user gives to you and you must respond
|
|
# Plan: a plan
|
|
# Thought: your thought
|
|
# Act: the action you took
|
|
# Actinput: the input to the action
|
|
# Obs: the result of the action
|
|
|
|
# Stimulus:
|
|
# $stimulus
|
|
|
|
# Your work:
|
|
# $work
|
|
|
|
# From your work, you job is to decide what to do next by choosing one of the following choices:
|
|
# If you are ready to do the next step of the plan say, "{Yes}". And what is the rationale behind the decision?
|
|
# If you need to repeat the latest step say, "{No}". And what is the rationale behind the decision?
|
|
# If you are ready to formulate a final respond to user original stimulus say, {formulateUserRespond}. And what is the rationale behind the decision?
|
|
# <|im_end|>
|
|
# """
|
|
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
Symbol meaning:
|
|
Stimulus: the input user gives to you and you must respond
|
|
Plan: a plan
|
|
Thought: your thought
|
|
Act: the action you took
|
|
Actinput: the input to the action
|
|
Obs: the result of the action
|
|
|
|
Stimulus:
|
|
$stimulus
|
|
|
|
Your work:
|
|
$work
|
|
|
|
Your job is to check whether step $(a.step) of your work is completed according to the plan and choose only one of the following choices.
|
|
choice 1: If you are ready to do the next step of the plan say, "{Yes}". And what is the rationale behind the decision?
|
|
choice 2: If you need to repeat the latest step say, "{No}". And what is the rationale behind the decision?
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"""
|
|
|
|
respond = sendReceivePrompt(a, prompt)
|
|
|
|
|
|
|
|
decision = nothing
|
|
reason = nothing
|
|
if occursin("Yes", respond)
|
|
decision = "Yes"
|
|
elseif occursin("No", respond)
|
|
decision = "No"
|
|
elseif occursin("formulateUserRespond", respond)
|
|
decision = "formulateUserRespond"
|
|
else
|
|
error("undefied condition, decision $decision $(@__LINE__)")
|
|
end
|
|
|
|
startInd = findfirst(decision, respond)[end] +2
|
|
|
|
if occursin(":", respond[startInd:end]) # check for ":" after decision cha
|
|
startInd2 = findnext(":", respond, startInd)[end]+1
|
|
reason = respond[startInd2:end]
|
|
else
|
|
reason = respond[startInd:end]
|
|
end
|
|
|
|
return decision, reason
|
|
end
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
end # module |