755 lines
20 KiB
Julia
Executable File
755 lines
20 KiB
Julia
Executable File
module interface
|
|
|
|
|
|
export agentReact, agentReflex,
|
|
addNewMessage, clearMessage, removeLatestMsg, conversation
|
|
|
|
using JSON3, DataStructures, Dates, UUIDs, HTTP
|
|
using CommUtils, GeneralUtils
|
|
using ..type, ..utils
|
|
|
|
# ---------------------------------------------------------------------------- #
|
|
# pythoncall setting #
|
|
# ---------------------------------------------------------------------------- #
|
|
# Ref: https://github.com/JuliaPy/PythonCall.jl/issues/252
|
|
# by setting the following variables, PythonCall will use system python or conda python and
|
|
# packages installed by system or conda
|
|
# if these setting are not set (comment out), PythonCall will use its own python and package that
|
|
# installed by CondaPkg (from env_preparation.jl)
|
|
# ENV["JULIA_CONDAPKG_BACKEND"] = "Null"
|
|
# systemPython = split(read(`which python`, String), "\n")[1]
|
|
# ENV["JULIA_PYTHONCALL_EXE"] = systemPython # find python location with $> which python ex. raw"/root/conda/bin/python"
|
|
|
|
# using PythonCall
|
|
# const py_agents = PythonCall.pynew()
|
|
# const py_llms = PythonCall.pynew()
|
|
# function __init__()
|
|
# # PythonCall.pycopy!(py_cv2, pyimport("cv2"))
|
|
|
|
# # equivalent to from urllib.request import urlopen in python
|
|
# PythonCall.pycopy!(py_agents, pyimport("langchain.agents"))
|
|
# PythonCall.pycopy!(py_llms, pyimport("langchain.llms"))
|
|
# end
|
|
|
|
#------------------------------------------------------------------------------------------------100
|
|
|
|
|
|
"""
|
|
add new message to agent
|
|
# Example
|
|
```jldoctest
|
|
julia> addNewMessage(agent1, "user", "Where should I go to buy snacks")
|
|
````
|
|
"""
|
|
function addNewMessage(a::T1, role::String, content::T2) where {T1<:agent, T2<:AbstractString}
|
|
if role ∉ a.availableRole # guard against typo
|
|
error("role is not in agent.availableRole")
|
|
end
|
|
|
|
# check whether user messages exceed limit
|
|
userMsg = 0
|
|
for i in a.messages
|
|
if i[:role] == "user"
|
|
userMsg += 1
|
|
end
|
|
end
|
|
messageleft = 0
|
|
|
|
if userMsg > a.maxUserMsg # delete all conversation
|
|
clearMessage(a)
|
|
messageleft = a.maxUserMsg
|
|
else
|
|
userMsg += 1
|
|
d = Dict(:role=> role, :content=> content, :timestamp=> Dates.now())
|
|
push!(a.messages, d)
|
|
messageleft = a.maxUserMsg - userMsg
|
|
end
|
|
|
|
return messageleft
|
|
end
|
|
|
|
function clearMessage(a::T) where {T<:agent}
|
|
for i in eachindex(a.messages)
|
|
if length(a.messages) > 1 # system instruction will NOT be deleted
|
|
pop!(a.messages)
|
|
else
|
|
break
|
|
end
|
|
end
|
|
a.thought = "nothing"
|
|
end
|
|
|
|
function removeLatestMsg(a::T) where {T<:agent}
|
|
if length(a.messages) > 1
|
|
pop!(a.messages)
|
|
end
|
|
end
|
|
|
|
# function generatePrompt_mistral_openorca(a::T, usermsg::String, role::Symbol) where {T<:agent}
|
|
# prompt =
|
|
# """
|
|
# <|im_start|>system
|
|
# {systemMsg}
|
|
# <|im_end|>
|
|
# Here are the context for the question:
|
|
# {context}
|
|
# """
|
|
# prompt = replace(prompt, "{systemMsg}" => a.roles[role])
|
|
|
|
# toolnames = ""
|
|
# toollines = ""
|
|
# for (toolname, v) in a.tools
|
|
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
|
|
# toollines *= toolline
|
|
# toolnames *= "$toolname,"
|
|
# end
|
|
# prompt = replace(prompt, "{toolnames}" => toolnames)
|
|
# prompt = replace(prompt, "{tools}" => toollines)
|
|
|
|
# prompt = replace(prompt, "{context}" => a.context)
|
|
|
|
# prompt *= "<|im_start|>user\n" * usermsg * "\n<|im_end|>\n"
|
|
# prompt *= "<|im_start|>assistant\n"
|
|
|
|
# return prompt
|
|
# end
|
|
|
|
# function generatePrompt_mistral_openorca(a::T, usermsg::String,
|
|
# thinkingMode::Symbol=:nothinking) where {T<:agent}
|
|
|
|
# prompt =
|
|
# """
|
|
# <|im_start|>system
|
|
# {systemMsg}
|
|
# You have access to the following tools:
|
|
# {tools}
|
|
# {thinkingMode}
|
|
# <|im_end|>
|
|
# Here are the context for the question:
|
|
# {context}
|
|
# """
|
|
# prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
|
|
# prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode])
|
|
# toolnames = ""
|
|
# toollines = ""
|
|
# for (toolname, v) in a.tools
|
|
# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
|
|
# toollines *= toolline
|
|
# toolnames *= "$toolname,"
|
|
# end
|
|
# prompt = replace(prompt, "{toolnames}" => toolnames)
|
|
# prompt = replace(prompt, "{tools}" => toollines)
|
|
|
|
# prompt = replace(prompt, "{context}" => a.context)
|
|
|
|
# prompt *= "<|im_start|>user\nQuestion: " * usermsg * "\n<|im_end|>\n"
|
|
# prompt *= "<|im_start|>assistant\n"
|
|
|
|
# return prompt
|
|
# end
|
|
|
|
|
|
function generatePrompt_mistral_openorca(a::T, usermsg::String,
|
|
thinkingMode::Symbol=:nothinking) where {T<:agent}
|
|
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
{systemMsg}
|
|
{tools}
|
|
{thinkingMode}
|
|
<|im_end|>
|
|
Here are the context for the stimulus:
|
|
{context}
|
|
"""
|
|
prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
|
|
prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode])
|
|
toolnames = ""
|
|
toollines = ""
|
|
for (toolname, v) in a.tools
|
|
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
|
|
toollines *= toolline
|
|
toolnames *= "$toolname,"
|
|
end
|
|
prompt = replace(prompt, "{toolnames}" => toolnames)
|
|
|
|
prompt = replace(prompt, "{context}" => a.context)
|
|
|
|
prompt *= "<|im_start|>user\nStimulus: " * usermsg * "\n<|im_end|>\n"
|
|
prompt *= "<|im_start|>assistant\n"
|
|
|
|
return prompt
|
|
end
|
|
|
|
function chat_mistral_openorca(a::agentReflex, usermsg::String)
|
|
"""
|
|
general prompt format:
|
|
|
|
"
|
|
<|im_start|>system
|
|
{role}
|
|
{tools}
|
|
{thinkingFormat}
|
|
<|im_end|>
|
|
{context}
|
|
<|im_start|>user
|
|
{usermsg}
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"
|
|
|
|
Note:
|
|
{context} =
|
|
"
|
|
{earlierConversation}
|
|
{env state}
|
|
{shortterm memory}
|
|
{longterm memory}
|
|
"
|
|
"""
|
|
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
{role}
|
|
{thinkingFormat}
|
|
<|im_end|>
|
|
{context}
|
|
<|im_start|>user
|
|
{usermsg}
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"""
|
|
prompt = replace(prompt, "{role}" => a.roles[a.role])
|
|
prompt = replace(prompt, "{thinkingFormat}" => "")
|
|
|
|
context =
|
|
"""
|
|
{earlierConversation}
|
|
{env state}
|
|
{longterm memory}
|
|
"""
|
|
context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
|
|
context = replace(context, "{env state}" => "")
|
|
context = replace(context, "{longterm memory}" => "")
|
|
|
|
prompt = replace(prompt, "{context}" => context)
|
|
|
|
prompt = replace(prompt, "{usermsg}" => "Stimulus: $usermsg")
|
|
|
|
return prompt
|
|
end
|
|
|
|
|
|
function planner_mistral_openorca(a::agentReflex, usermsg::String)
|
|
"""
|
|
general prompt format:
|
|
|
|
"
|
|
<|im_start|>system
|
|
{role}
|
|
{tools}
|
|
{thinkingFormat}
|
|
<|im_end|>
|
|
{context}
|
|
<|im_start|>user
|
|
{usermsg}
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"
|
|
|
|
Note:
|
|
{context} =
|
|
"
|
|
{earlierConversation}
|
|
{env state}
|
|
{shortterm memory}
|
|
{longterm memory}
|
|
"
|
|
"""
|
|
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
{role}
|
|
{tools}
|
|
{thinkingFormat}
|
|
<|im_end|>
|
|
{context}
|
|
<|im_start|>user
|
|
{usermsg}
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"""
|
|
prompt = replace(prompt, "{role}" => a.roles[a.role])
|
|
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:planner])
|
|
toolnames = ""
|
|
toollines = ""
|
|
for (toolname, v) in a.tools
|
|
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
|
|
toollines *= toolline
|
|
toolnames *= "$toolname,"
|
|
end
|
|
|
|
prompt = replace(prompt, "{toolnames}" => toolnames)
|
|
prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
|
|
|
|
context =
|
|
"""
|
|
{earlierConversation}
|
|
{env state}
|
|
{longterm memory}
|
|
"""
|
|
context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
|
|
context = replace(context, "{env state}" => "")
|
|
context = replace(context, "{longterm memory}" => "")
|
|
|
|
prompt = replace(prompt, "{context}" => context)
|
|
|
|
prompt = replace(prompt, "{usermsg}" => "Stimulus: $usermsg")
|
|
|
|
return prompt
|
|
end
|
|
|
|
function actor_mistral_openorca(a::agentReflex, usermsg::T) where {T<:AbstractString}
|
|
"""
|
|
general prompt format:
|
|
|
|
"
|
|
<|im_start|>system
|
|
{role}
|
|
{tools}
|
|
{thinkingFormat}
|
|
<|im_end|>
|
|
{context}
|
|
<|im_start|>user
|
|
{usermsg}
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"
|
|
|
|
Note:
|
|
{context} =
|
|
"
|
|
{earlierConversation}
|
|
{env state}
|
|
{shortterm memory}
|
|
{longterm memory}
|
|
"
|
|
"""
|
|
|
|
prompt =
|
|
"""
|
|
<|im_start|>system
|
|
{role}
|
|
{tools}
|
|
{thinkingFormat}
|
|
<|im_end|>
|
|
{context}
|
|
<|im_start|>user
|
|
{usermsg}
|
|
<|im_end|>
|
|
<|im_start|>assistant
|
|
|
|
"""
|
|
|
|
prompt = replace(prompt, "{role}" => a.roles[a.role])
|
|
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:actor])
|
|
toolnames = ""
|
|
toollines = ""
|
|
for (toolname, v) in a.tools
|
|
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
|
|
toollines *= toolline
|
|
toolnames *= "$toolname, "
|
|
end
|
|
prompt = replace(prompt, "{toolnames}" => toolnames)
|
|
prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
|
|
|
|
context =
|
|
"""
|
|
{env state}
|
|
{longterm memory}
|
|
"""
|
|
# context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
|
|
context = replace(context, "{env state}" => "")
|
|
context = replace(context, "{longterm memory}" => "")
|
|
|
|
prompt = replace(prompt, "{context}" => context)
|
|
|
|
prompt = replace(prompt, "{usermsg}" => "Stimulus: $usermsg")
|
|
|
|
return prompt
|
|
end
|
|
|
|
"""
|
|
Chat with llm.
|
|
|
|
```jldoctest
|
|
julia> using JSON3, UUIDs, Dates, FileIO, CommUtils, ChatAgent
|
|
julia> mqttClientSpec = (
|
|
clientName= "someclient", # name of this client
|
|
clientID= "$(uuid4())",
|
|
broker= "mqtt.yiem.ai",
|
|
pubtopic= (imgAI="img/api/v0.0.1/gpu/request",
|
|
txtAI="txt/api/v0.1.0/gpu/request"),
|
|
subtopic= (imgAI="agent/api/v0.1.0/img/respond",
|
|
txtAI="agent/api/v0.1.0/txt/respond"),
|
|
keepalive= 30,
|
|
)
|
|
julia> msgMeta = Dict(
|
|
:msgPurpose=> "updateStatus",
|
|
:from=> "agent",
|
|
:to=> "llmAI",
|
|
:requestrespond=> "request",
|
|
:sendto=> "", # destination topic
|
|
:replyTo=> "agent/api/v0.1.0/txt/respond", # requester ask responder to send reply to this topic
|
|
:repondToMsgId=> "", # responder is responding to this msg id
|
|
:taskstatus=> "", # "complete", "fail", "waiting" or other status
|
|
:timestamp=> Dates.now(),
|
|
:msgId=> "$(uuid4())",
|
|
)
|
|
julia> newAgent = ChatAgent.agentReact(
|
|
"Jene",
|
|
mqttClientSpec,
|
|
role=:assistant_react,
|
|
msgMeta=msgMeta
|
|
)
|
|
julia> respond = ChatAgent.conversation(newAgent, "Hi! how are you?")
|
|
```
|
|
"""
|
|
function conversation(a::T, usermsg::String) where {T<:agent}
|
|
respond = nothing
|
|
|
|
if a.thought != "nothing" # continue thought
|
|
_ = addNewMessage(a, "user", usermsg)
|
|
a.thought *= "Obs $(a.thinkinground): $usermsg\n"
|
|
prompt = a.thought
|
|
respond = work(a, prompt)
|
|
else # new thought
|
|
thinkingmode = chooseThinkingMode(a, usermsg)
|
|
@show thinkingmode
|
|
if thinkingmode == :no_thinking
|
|
a.context = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details
|
|
_ = addNewMessage(a, "user", usermsg)
|
|
prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode)
|
|
@show prompt
|
|
respond = sendReceivePrompt(a, prompt)
|
|
respond = split(respond, "<|im_end|>")[1]
|
|
respond = replace(respond, "\n" => "")
|
|
_ = addNewMessage(a, "assistant", respond)
|
|
@show respond
|
|
elseif thinkingmode == :thinking
|
|
a.context = conversationSummary(a)
|
|
_ = addNewMessage(a, "user", usermsg)
|
|
prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode)
|
|
respond = work(a, prompt)
|
|
else
|
|
error("undefined condition thinkingmode = $thinkingmode")
|
|
end
|
|
end
|
|
|
|
return respond
|
|
end
|
|
|
|
"""
|
|
Continuously run llm functions except when llm is getting Answer: or chatbox.
|
|
There are many work() depend on thinking mode.
|
|
"""
|
|
function work(a::T, prompt::String, maxround::Int=3) where {T<:agent}
|
|
respond = nothing
|
|
while true
|
|
a.thinkinground += 1
|
|
@show a.thinkinground
|
|
toolname = nothing
|
|
toolinput = nothing
|
|
|
|
if a.thinkinground > a.thinkingroundlimit
|
|
a.thought *= "Thought $(a.thinkinground): I think I know the answer."
|
|
prompt = a.thought
|
|
end
|
|
|
|
@show prompt
|
|
respond = sendReceivePrompt(a, prompt)
|
|
|
|
headerToDetect = nothing
|
|
if a.thinkinground == 1
|
|
try
|
|
respond = split(respond, "Obs:")[1]
|
|
headerToDetect = ["Question:", "Plan:", "Thought:", "Act:", "ActInput:", "Obs:", "...", "Answer:",
|
|
"Conclusion:", "Summary:"]
|
|
catch
|
|
end
|
|
else
|
|
try
|
|
respond = split(respond, "Obs $(a.thinkinground):")[1]
|
|
headerToDetect = ["Question $(a.thinkinground):", "Plan $(a.thinkinground):",
|
|
"Thought $(a.thinkinground):", "Act $(a.thinkinground):",
|
|
"ActInput $(a.thinkinground):", "Obs $(a.thinkinground):",
|
|
"...", "Answer:",
|
|
"Conclusion:", "Summary:"]
|
|
catch
|
|
end
|
|
end
|
|
@show respond
|
|
headers = detectCharacters(respond, headerToDetect)
|
|
chunkedtext = chunktext(respond, headers)
|
|
|
|
Answer = findDetectedCharacter(headers, "Answer:")
|
|
AnswerInd = length(Answer) != 0 ? Answer[1] : nothing
|
|
Act = findDetectedCharacter(headers, "Act $(a.thinkinground):")
|
|
if length(Answer) == 1 && length(Act) == 0
|
|
a.thought = "nothing" # assignment finished, no more thought
|
|
a.context = "nothing"
|
|
a.thinkinground = 0
|
|
respond = chunkedtext[AnswerInd][:body]
|
|
respond = replace(respond, "<|im_end|>"=>"")
|
|
_ = addNewMessage(a, "assistant", respond)
|
|
break
|
|
else
|
|
|
|
# check for tool being called
|
|
ActHeader = a.thinkinground == 1 ? "Act:" : "Act $(a.thinkinground):"
|
|
if length(findDetectedCharacter(headers, ActHeader)) != 0 # check whether there is Act: in a respond
|
|
ActInd = findDetectedCharacter(headers, ActHeader)[1]
|
|
toolname = toolNameBeingCalled(chunkedtext[ActInd][:body], a.tools)
|
|
end
|
|
ActInputHeader = a.thinkinground == 1 ? "ActInput:" : "ActInput $(a.thinkinground):"
|
|
if length(findDetectedCharacter(headers, ActInputHeader)) != 0 # check whether there is ActInput: in a respond
|
|
ActInputInd = findDetectedCharacter(headers, ActInputHeader)[1]
|
|
toolinput = chunkedtext[ActInputInd][:body]
|
|
end
|
|
|
|
# clean up
|
|
if occursin(" \"", toolinput)
|
|
toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " \"", "\"\n")
|
|
else
|
|
toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " ", "\n")
|
|
end
|
|
@show toolname
|
|
@show toolinput
|
|
if toolname === nothing || toolinput === nothing
|
|
println("toolname $toolname toolinput $toolinput retry thinking")
|
|
a.thinkinground -= 1
|
|
continue
|
|
end
|
|
|
|
if a.thought == "nothing"
|
|
thought = ""
|
|
for i in chunkedtext
|
|
header = i[:header]
|
|
header = replace(header, ":"=>" $(a.thinkinground):") # add number so that llm not confused
|
|
body = i[:body]
|
|
thought *= "$header $body"
|
|
end
|
|
a.thought = prompt * thought
|
|
else
|
|
a.thought *= respond
|
|
end
|
|
|
|
|
|
if toolname == "chatbox" # chat with user
|
|
a.thought *= toolinput
|
|
respond = toolinput
|
|
_ = addNewMessage(a, "assistant", respond)
|
|
break
|
|
else # function call
|
|
f = a.tools[Symbol(toolname)][:func]
|
|
_result = f(toolinput)
|
|
if _result != "No info available." #TODO for use with wikisearch(). Not good for other tools
|
|
_result = makeSummary(a, _result)
|
|
end
|
|
result = "Obs $(a.thinkinground): $_result\n"
|
|
a.thought *= result
|
|
prompt = a.thought
|
|
end
|
|
end
|
|
end
|
|
@show respond
|
|
return respond
|
|
end
|
|
|
|
function conversation(a::agentReflex, usermsg::String; attemptlimit::Int=3)
|
|
a.attemptlimit = attemptlimit
|
|
respond = nothing
|
|
|
|
# determine thinking mode
|
|
a.thinkingmode = chooseThinkingMode(a, usermsg)
|
|
@show a.thinkingmode
|
|
|
|
if a.thinkingmode == :no_thinking
|
|
a.earlierConversation = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details
|
|
_ = addNewMessage(a, "user", usermsg)
|
|
prompt = chat_mistral_openorca(a, usermsg) #TODO rewrite this function
|
|
@show prompt
|
|
respond = sendReceivePrompt(a, prompt)
|
|
respond = split(respond, "<|im_end|>")[1]
|
|
respond = replace(respond, "\n" => "")
|
|
_ = addNewMessage(a, "assistant", respond)
|
|
@show respond
|
|
else
|
|
respond = work(a, usermsg)
|
|
end
|
|
|
|
return respond
|
|
end
|
|
|
|
|
|
function work(a::agentReflex, usermsg::String)
|
|
if a.thinkingmode == :new_thinking
|
|
a.earlierConversation = conversationSummary(a)
|
|
_ = addNewMessage(a, "user", usermsg)
|
|
elseif a.thinkingmode == :continue_thinking
|
|
error("continue_thinking")
|
|
_ = addNewMessage(a, "user", usermsg)
|
|
a.thought *= "Obs $(a.attempt): $usermsg\n"
|
|
else
|
|
error("undefined condition thinkingmode = $thinkingmode")
|
|
end
|
|
|
|
while true # Work loop
|
|
# plan
|
|
a.attempt += 1
|
|
if a.attempt <= a.attemptlimit
|
|
|
|
else # attempt limit reached
|
|
|
|
end
|
|
@show a.attempt
|
|
@show usermsg
|
|
logmsg = "<|im_start|>user:\nStimulus: $usermsg\n<|im_end|>\n"
|
|
a.memory[:shortterm] *= logmsg
|
|
|
|
toolname = nothing
|
|
toolinput = nothing
|
|
prompt = planner_mistral_openorca(a, usermsg)
|
|
@show prompt
|
|
respond = sendReceivePrompt(a, prompt)
|
|
plan = split(respond, "<|im_end|>")[1]
|
|
plan = split(plan, "Response:")[1]
|
|
_plan = replace(plan, "Plan:"=>"Plan $(a.attempt):")
|
|
logmsg = "<|im_start|>assistant:\n$_plan\n"
|
|
a.memory[:shortterm] *= logmsg
|
|
actorstate, result = actor(a, plan)
|
|
#WORKING if actorstate == "chatbox" break work loop and get back to user
|
|
|
|
# evaluate
|
|
|
|
|
|
|
|
end
|
|
end
|
|
|
|
"""
|
|
Actor function.
|
|
"""
|
|
function actor(a::agentReflex, plan::T) where {T<:AbstractString}
|
|
actorState = nothing
|
|
@show plan
|
|
totalsteps = checkTotalStepInPlan(a, plan)
|
|
|
|
result = nothing
|
|
a.step = 0
|
|
while true # Actor loop
|
|
a.step += 1
|
|
@show a.step
|
|
if a.step <= totalsteps
|
|
stepdetail = extractStepFromPlan(a, plan, a.step)
|
|
prompt = actor_mistral_openorca(a, stepdetail)
|
|
@show prompt
|
|
respond = sendReceivePrompt(a, prompt)
|
|
respond = split(respond, "<|im_end|>")[1]
|
|
@show respond
|
|
headerToDetect = ["Question:", "Plan:", "Thought:", "Act:", "ActInput:", "Obs:", "...", "Answer:",
|
|
"Conclusion:", "Summary:"]
|
|
headers = detectCharacters(respond, headerToDetect)
|
|
|
|
# add to memory
|
|
_respond = addStepNumber(respond, headers, a.step)
|
|
a.memory[:shortterm] *= _respond
|
|
|
|
chunkedtext = chunktext(respond, headers)
|
|
toolname = toolNameBeingCalled(chunkedtext["Act:"], a.tools)
|
|
toolinput = chunkedtext["ActInput:"]
|
|
@show toolname
|
|
@show toolinput
|
|
|
|
#WORKING
|
|
if toolname == "chatbox" # chat with user
|
|
# a.memory[:shortterm] *= toolinput
|
|
respond = toolinput
|
|
_ = addNewMessage(a, "assistant", respond)
|
|
result = respond
|
|
actorState = toolname
|
|
error("actor done 0")
|
|
break
|
|
else # function call
|
|
f = a.tools[Symbol(toolname)][:func]
|
|
result = f(toolinput)
|
|
result = "\nObs $(a.step): $result\n"
|
|
a.memory[:shortterm] *= result
|
|
end
|
|
else #TODO finish all steps
|
|
|
|
|
|
|
|
actorState = "all steps done"
|
|
result = "all steps done"
|
|
error("actor done 2")
|
|
break
|
|
end
|
|
|
|
|
|
|
|
end
|
|
error("actor done 3")
|
|
return actorState, result
|
|
end
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
end # module |