adding React agent

This commit is contained in:
2023-11-14 11:58:49 +00:00
parent 0db827839b
commit f39f76e4e1

View File

@@ -1,8 +1,8 @@
module interface
export agent, addNewMessage, clearMessage, removeLatestMsg, generatePrompt_tokenPrefix,
generatePrompt_tokenSuffix, conversation
export agentReact, addNewMessage, clearMessage, removeLatestMsg, generatePrompt_tokenPrefix,
generatePrompt_tokenSuffix, conversation, work
using JSON3, DataStructures, Dates, UUIDs
using CommUtils, GeneralUtils
@@ -32,33 +32,101 @@ using CommUtils, GeneralUtils
#------------------------------------------------------------------------------------------------100
@kwdef mutable struct agent
availableRole::AbstractVector = ["system", "user", "assistant"]
agentName::String = "assistant"
maxUserMsg::Int = 10
earlierConversation::String = "" # summary of earlier conversation
mqttClient::Union{mqttClient, Nothing} = nothing
msgMeta::Union{Dict, Nothing} = nothing
abstract type agent end
""" Dict(Role=> Content) ; Role can be system, user, assistant
Example:
messages=[
Dict(:role=>"system", :content=> "You are a helpful assistant."),
Dict(:role=>"assistant", :content=> "How may I help you"),
Dict(:role=>"user", :content=> "Hello, how are you"),
]
"""
# Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3
# messages= [Dict(:role=>"system", :content=> "", :timestamp=> Dates.now()),]
messages = []
thougt::String = "" # internal thinking area
info::String = "" # additional info
@kwdef mutable struct agentReact <: agent
availableRole::AbstractVector = ["system", "user", "assistant"]
agentName::String = "assistant"
maxUserMsg::Int = 10
earlierConversation::String = "" # summary of earlier conversation
mqttClient::Union{mqttClient, Nothing} = nothing
msgMeta::Union{Dict, Nothing} = nothing
""" Dict(Role=> Content) ; Role can be system, user, assistant
Example:
messages=[
Dict(:role=>"system", :content=> "You are a helpful assistant."),
Dict(:role=>"assistant", :content=> "How may I help you"),
Dict(:role=>"user", :content=> "Hello, how are you"),
]
"""
role::Symbol = :assistant
roles::Dict = Dict(:assistant => "You are a helpful assistant.",)
# Ref: Chat prompt format https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3
# messages= [Dict(:role=>"system", :content=> "", :timestamp=> Dates.now()),]
messages = Vector{Dict{String, Any}}()
context::String = "nothing" # internal thinking area
tools::Union{Dict, Nothing} = nothing
thought::String = "nothing" # contain unfinished thoughts
end
function agent(
function agentReact(
agentName::String,
mqttClientSpec::NamedTuple;
systemMessage::String="You are a helpful assistant.", # system message of an agent
role::Symbol=:assistant,
roles::Dict=Dict(
:assistant_react =>
"""
You are a helpful assistant. You don't know other people personal info previously.
Use the following format:
QTS: the input question your user is asking and you must answer
Plan: first you should always think about the question and the info you have thoroughly then extract and devise a complete plan to find the answer (pay attention to variables and their corresponding numerals).
Thought: you should always think about the info you need and what to do (pay attention to correct numeral calculation and commonsense).
Act: the action tool related to what you intend to do, should be one of [Chatbox, Internet, WineStock]
ActInput: the input to the action (pay attention to the tool's input)
Obs: the result of the action
... (this Plan/Thought/Act/ActInput/Obs loop can repeat N times.)
Thought: I think I know the answer
ANS: Answer of the original question and the rationale behind your answer
""",
:sommelier =>
"""
You are a sommelier at an online wine reseller who always ask user for wine relevant info before you could help them choosing wine.
You usually recommend atmost 2 wines for customers.
You don't know other people personal info previously.
Use the following format:
QTS: the input question your user is asking and you must answer
Plan: first you should always think about the question and the info you have thoroughly then extract and devise a complete plan to find the answer (pay attention to variables and their corresponding numerals).
Thought: ask yourself do you have all the info you need? And what to do (pay attention to correct numeral calculation and commonsense).
Act: the tool that match your thought, should be one of [Chatbox, Internet, WineStock]
ActInput: the input to the action (pay attention to the tool's input)
Obs: the result of the action
... (this Plan/Thought/Act/ActInput/Obs loop can repeat N times until you know the answer.)
ANS: Answer of the original question. You describe detailed benefits of each answer to user's preference.
Info used to select wine:
- type of food
- occasion
- user's personal taste of wine
- wine price range
- temperature at the serving location
- wine we have in stock
""",
),
tools::Dict=Dict(
:internetsearch=>Dict(
:name => "internetsearch",
:description => "Useful for when you need to search the Internet",
:input => "Input should be a search query.",
:output => "",
# :func => internetsearch # function
),
:chatbox=>Dict(
:name => "chatbox",
:description => "Useful for when you need to ask a customer what you need to know or to talk with them.",
:input => "Input should be a conversation to customer.",
:output => "" ,
),
:wineStock=>Dict(
:name => "wineStock",
:description => "useful for when you need to search for wine by your description, price, name or ID.",
:input => "Input should be a search query with as much details as possible.",
:output => "" ,
),
),
msgMeta::Dict=Dict(
:msgPurpose=> "updateStatus",
:from=> "chatbothub",
@@ -73,37 +141,19 @@ function agent(
),
availableRole::AbstractArray=["system", "user", "assistant"],
maxUserMsg::Int=10,)
newAgent = agent()
newAgent = agentReact()
newAgent.availableRole = availableRole
newAgent.maxUserMsg = maxUserMsg
systemMessage_ = "Your name is $agentName. " * systemMessage
push!(newAgent.messages, Dict(:role=>"system", :content=> systemMessage_, :timestamp=> Dates.now()))
newAgent.mqttClient = CommUtils.mqttClient(mqttClientSpec)
newAgent.msgMeta = msgMeta
newAgent.tools = tools
newAgent.role = role
newAgent.roles = roles
return newAgent
end
# @kwdef mutable struct agentLangchain
# availableRole=["system", "user", "assistant"]
# maxUserMsg::Int= 10
# llmAIRequestTopic_openblas = "llm/openblas/request"
# llmAIRequestTopic_gpu = "llm/api/v0.0.1/gpu/request"
# self_llmReceiveTopic = "chatbothub/llm/respond"
# """ Dict(Role=> Content) ; Role can be system, user, assistant
# Example:
# messages=[
# Dict(:role=>"system", :content=> "You are a helpful assistant."),
# Dict(:role=>"assistant", :content=> "How may I help you"),
# Dict(:role=>"user", :content=> "Hello, how are you"),
# ]
# """
# # Ref: https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGML/discussions/3
# #
# messages=[Dict(:role=>"system", :content=> "You are a helpful assistant.", :timestamp=> Dates.now()),]
# end
"""
add new message to agent
@@ -112,7 +162,7 @@ end
julia> addNewMessage(agent1, "user", "Where should I go to buy snacks")
````
"""
function addNewMessage(a::agent, role::String, content::String)
function addNewMessage(a::T, role::String, content::String) where {T<:agent}
if role a.availableRole # guard against typo
error("role is not in agent.availableRole")
end
@@ -139,8 +189,7 @@ function addNewMessage(a::agent, role::String, content::String)
return messageleft
end
function clearMessage(a::agent)
function clearMessage(a::T) where {T<:agent}
for i in eachindex(a.messages)
if length(a.messages) > 1 # system instruction will NOT be deleted
pop!(a.messages)
@@ -150,27 +199,87 @@ function clearMessage(a::agent)
end
end
function removeLatestMsg(a::agent)
function removeLatestMsg(a::T) where {T<:agent}
if length(a.messages) > 1
pop!(a.messages)
end
end
function generatePrompt_tokenSuffix(a::agent;
userToken::String="[/INST]", assistantToken="[INST]",
systemToken="[INST]<<SYS>> content <</SYS>>")
prompt = nothing
for msg in a.messages
# function generatePrompt_tokenSuffix(a::agentReact;
# userToken::String="[/INST]", assistantToken="[INST]",
# systemToken="[INST]<<SYS>> content <</SYS>>")
# prompt = nothing
# for msg in a.messages
# role = msg[:role]
# content = msg[:content]
# if role == "system"
# prompt = replace(systemToken, "content" => content) * " "
# elseif role == "user"
# prompt *= " " * content * " " * userToken
# elseif role == "assistant"
# prompt *= " " * content * " " * assistantToken
# else
# error("undefied condition role = $role")
# end
# end
# return prompt
# end
# function generatePrompt_tokenPrefix(a::agentReact;
# userToken::String="Q:", assistantToken="A:",
# systemToken="[INST]<<SYS>> content <</SYS>>")
# prompt = nothing
# for msg in a.messages
# role = msg[:role]
# content = msg[:content]
# if role == "system"
# prompt = replace(systemToken, "content" => content) * " "
# elseif role == "user"
# prompt *= userToken * " " * content * " "
# elseif role == "assistant"
# prompt *= assistantToken * " " * content * " "
# else
# error("undefied condition role = $role")
# end
# end
# return prompt
# end
function generatePrompt_react_mistral_openorca(messages::Dict, systemMsg::String, context::String="",
tools::Union{Dict, Nothing}=nothing)
promptTemplate =
"""
<|im_start|>system
{systemMsg}
You have access to the following tools:
{tools}
Begin!
<|im_end|>
Here are the context for the question:
{context}
"""
for msg in messages
role = msg[:role]
content = msg[:content]
if role == "system"
prompt = replace(systemToken, "content" => content) * " "
prompt = replace(promptTemplate, "{systemMsg}" => systemMsg)
toollines = ""
for tool in tools
toolline = "$(tool[:name]): $(tool[:description]) $(tool[:input]) $(tool[:output])\n"
toollines *= toolline
end
prompt = replace(promptTemplate, "{tools}" => toollines)
prompt = replace(promptTemplate, "{context}" => context)
elseif role == "user"
prompt *= " " * content * " " * userToken
prompt *= "<|im_start|>user\n" * content * "\n<|im_end|>\n"
elseif role == "assistant"
prompt *= " " * content * " " * assistantToken
else
prompt *= "<|im_start|>assistant\n" * content * "\n<|im_end|>\n"
else
error("undefied condition role = $role")
end
end
@@ -178,53 +287,217 @@ function generatePrompt_tokenSuffix(a::agent;
return prompt
end
function generatePrompt_tokenPrefix(a::agent;
userToken::String="Q:", assistantToken="A:",
systemToken="[INST]<<SYS>> content <</SYS>>")
prompt = nothing
for msg in a.messages
role = msg[:role]
content = msg[:content]
if role == "system"
prompt = replace(systemToken, "content" => content) * " "
elseif role == "user"
prompt *= userToken * " " * content * " "
elseif role == "assistant"
prompt *= assistantToken * " " * content * " "
else
error("undefied condition role = $role")
end
function generatePrompt_react_mistral_openorca(a::T, usermsg::String) where {T<:agent}
prompt =
"""
<|im_start|>system
{systemMsg}
You have access to the following tools:
{tools}
Begin!
<|im_end|>
Here are the context for the question:
{context}
"""
prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
toollines = ""
for (toolname, v) in a.tools
toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
toollines *= toolline
end
prompt = replace(prompt, "{tools}" => toollines)
prompt = replace(prompt, "{context}" => a.context)
prompt *= "<|im_start|>user\nQTS: " * usermsg * "\n<|im_end|>\n"
prompt *= "<|im_start|>assistant\n"
return prompt
end
function conversation(a::agent, usermsg::String)
addNewMessage(a, "user", usermsg)
userIntent = identifyUserIntention(a, usermsg)
@show userIntent
#WORKING 1) add if-else user intention logic. 2) add recursive thinking
if userIntent == "chat"
generatePrompt_tokenPrefix(a, userToken="Q:", assistantToken="A:")
result = sendReceivePrompt(a, usermsg)
addNewMessage(a, "assistant", result)
return result
elseif userIntent == "task"
#WORKING
function conversation(a::T, usermsg::String) where {T<:agent}
if a.thought == "nothing"
a.context = conversationSummary(a)
addNewMessage(a, "user", usermsg)
prompt = generatePrompt_react_mistral_openorca(a, usermsg)
@show prompt
error("conversation done")
else
error("user intent $userIntent not define $(@__LINE__)")
end
end
#WORKING
function work(a::T, usermsg::String) where {T<:agent}
end
function workContinueThought(a::T, usermsg::String) where {T<:agent}
end
#WORKING
"""
make a conversation summary.
```julia
julia> conversation = [
Dict(:role=> "user", :content=> "I would like to get a bottle of wine", :timestamp=> Dates.now()),
Dict(:role=> "assistant", :content=> "What kind of Thai dishes are you having?", :timestamp=> Dates.now()),
Dict(:role=> "user", :content=> "It a pad thai.", :timestamp=> Dates.now()),
Dict(:role=> "assistant", :content=> "Is there any special occasion for this event?", :timestamp=> Dates.now()),
Dict(:role=> "user", :content=> "We'll hold a wedding party at the beach.", :timestamp=> Dates.now()),
Dict(:role=> "assistant", :content=> "What is your preferred type of wine?", :timestamp=> Dates.now()),
Dict(:role=> "user", :content=> "I like dry white wine with medium tanins.", :timestamp=> Dates.now()),
Dict(:role=> "assistant", :content=> "What is your preferred price range for this bottle of wine?", :timestamp=> Dates.now()),
Dict(:role=> "user", :content=> "lower than 50 dollars.", :timestamp=> Dates.now()),
Dict(:role=> "assistant", :content=> "Based on your preferences and our stock, I recommend the following two wines for you:
1. Pierre Girardin \"Murgers des Dents de Chien\" - Saint-Aubin 1er Cru (17 USD)
2. Etienne Sauzet'Les Perrieres' - Puligny Montrachet Premier Cru (22 USD)
The first wine, Pierre Girardin \"Murgers des Dents de Chien\" - Saint-Aubin 1er Cru, is a great choice for its affordable price and refreshing taste.
It pairs well with Thai dishes and will be perfect for your beach wedding party.
The second wine, Etienne Sauzet'Les Perrieres' - Puligny Montrachet Premier Cru, offers a more complex flavor profile and slightly higher price point, but still remains within your budget.
Both wines are suitable for serving at 22 C temperature.", :timestamp=> Dates.now()),
]
julia> summary = conversationSummary()
```
"""
function conversationSummary(a::T) where {T<:agent}
promptTemplate =
"""
<|im_start|>system
You are a helpful assistant.
<|im_end|>
<|im_start|>user
Please make a detailed bullet summary of the following earlier conversation between you and the user.
{conversation}
<|im_end|>
"""
conversation = ""
summary = ""
if length(a.messages)!= 0
for msg in a.messages
role = msg[:role]
content = msg[:content]
if role == "user"
conversation *= "$role: $content\n"
elseif role == "assistant"
conversation *= "I: $content\n"
else
error("undefied condition role = $role")
end
end
prompt = replace(promptTemplate, "{conversation}" => conversation)
result = sendReceivePrompt(a, prompt)
summary = result === nothing ? "nothing" : result
if summary[1:1] == "\n"
summary = summary[2:end]
end
end
return summary
end
# function work2(a::agentReact, usermsg::String)
# addNewMessage(a, "user", usermsg)
# userIntent = identifyUserIntention(a, usermsg)
# @show userIntent
# # checkReasonableness()
# if userIntent == "chat"
# prompt = generatePrompt_tokenPrefix(a, userToken="Q:", assistantToken="A:")
# result = sendReceivePrompt(a, prompt)
# addNewMessage(a, "assistant", result)
# return result
# elseif userIntent == "task"
# while true
# if thought == "nothing" # no unfinished thought
# prompt = generatePrompt_react_mistral_openorca(
# a.messages, a.roles[a.role], a.context, a.tools)
# output = sendReceivePrompt(a, prompt)
# obscount = count(output["text"], "Obs:")
# a.thought = prompt * out
# if contains(output["text"], "ANS:") # know the answer
# a.thought = "nothing"
# return output["text"]
# else
# out = split(output["text"], "Obs:")[1] # LLM may generate long respond with multiple Obs: but I do only 1 Obs: at a time(1st).
# act = react_act(out, "first")
# actinput = react_actinput(out, "first")
# toolname = toolNameBeingCalled(act, a.tools)
# if toolname == "chatbox"
# return actinput
# else # function call
# toolresult = a.tools[toolname][:func](actinput)
# Obs = "Obs: $toolresult\n" # observe in ReAct agent
# work(a, Obs)
# end
# end
# else # continue thought
# usermsg = "Obs: $usermsg"
# prompt = a.thought * usermsg
# output = sendReceivePrompt(a, prompt)
# obs = count(output["text"], "Obs:")
# out = split(output["text"], "Obs:")[1]
# a.thought = prompt * out
# if obs == 0 # llm config has too short characters generation
# error("No Obs: detected. Probably LLM config has too short max_tokens generation")
# elseif obs == 1 # first conversation
# act = react_act(out, "first")
# actinput = react_actinput(out, "first")
# toolname = toolNameBeingCalled(act, a.tools)
# if toolname == "chatbox"
# return actinput
# else # function call
# toolresult = a.tools[toolname][:func](actinput)
# Obs = "Obs: $toolresult\n" # observe in ReAct agent
# work(a, Obs)
# end
# else # later conversation
# act = react_act(out, "last")
# actinput = react_actinput(out, "last")
# toolname = toolNameBeingCalled(act, a.tools)
# if toolname == "chatbox"
# return actinput
# else # function call
# toolresult = a.tools[toolname][:func](actinput)
# Obs = "Obs: $toolresult\n" # observe in ReAct agent
# work(a, Obs)
# end
# end
# end
# else
# error("user intent $userIntent not define $(@__LINE__)")
# end
# end
function workContinue(a::agent)
end
#TESTING
function identifyUserIntention(a::agent, usermsg::String)
identify_usermsg =
prompt =
"""
You are to determine intention of the question.
<|im_start|>system
You are a helpful assistant. Your job is to determine intention of the question.
Your choices are:
chat: normal conversation that you don't need to do something.
task: a request for you to do something.
@@ -274,50 +547,26 @@ function identifyUserIntention(a::agent, usermsg::String)
Begin!
Here are the context for the question:
{context}
<|im_end|>
<|im_start|>user
Question: {input}
<|im_end|>
<|im_start|>assistant
"""
identify_usermsg = replace(identify_usermsg, "{input}" => usermsg)
result = sendReceivePrompt(a, identify_usermsg)
# msg = Dict(
# :msgMeta=> a.msgMeta,
# :txt=> identify_usermsg,
# )
# payloadChannel = Channel(1)
# # send prompt
# CommUtils.request(a.mqttClient, msg, pubtopic=a.mqttClient.pubtopic.llmAI)
# starttime = Dates.now()
# timeout = 10
# result = nothing
# while true
# timepass = (Dates.now() - starttime).value / 1000.0
# CommUtils.mqttRun(a.mqttClient, payloadChannel)
# if isready(payloadChannel)
# topic, payload = take!(payloadChannel)
# if payload[:msgMeta][:repondToMsgId] == msg[:msgMeta][:msgId]
# result = payload[:txt]
# break
# end
# elseif timepass <= timeout
# # skip, within waiting period
# elseif timepass > timeout
# result = nothing
# break
# else
# error("undefined condition $(@__LINE__)")
# end
# end
prompt = replace(prompt, "{input}" => usermsg)
result = sendReceivePrompt(a, prompt)
answer = result === nothing ? nothing : GeneralUtils.getStringBetweenCharacters(result, "{", "}")
return answer
end
function sendReceivePrompt(a::agent, prompt::String; timeout::Int=10)
function sendReceivePrompt(a::agent, prompt::String; timeout::Int=30)
a.msgMeta[:msgId] = "$(uuid4())" # new msg id for each msg
msg = Dict(
:msgMeta=> a.msgMeta,
@@ -336,7 +585,7 @@ function sendReceivePrompt(a::agent, prompt::String; timeout::Int=10)
if isready(payloadChannel)
topic, payload = take!(payloadChannel)
if payload[:msgMeta][:repondToMsgId] == msg[:msgMeta][:msgId]
result = payload[:txt]
result = haskey(payload, :txt) ? payload[:txt] : nothing
break
end
elseif timepass <= timeout
@@ -352,37 +601,134 @@ function sendReceivePrompt(a::agent, prompt::String; timeout::Int=10)
return result
end
# function getStringBetweenCurlyBraces(s::AbstractString)
# m = match(r"\{(.+?)\}", s)
# m = m == "" ? "" : m.captures[1]
# return m
# end
# function getStringBetweenCharacters(text::AbstractString, startChar::String, endChar::String)
# startIndex= findlast(startChar, text)
# endIndex= findlast(endChar, text)
# if startIndex === nothing || endIndex === nothing
# return nothing
# else
# return text[startIndex.stop+1: endIndex.start-1]
# end
# end
function toolNameBeingCalled(act::String, tools::Dict)
toolNameBeingCalled = nothing
for (k, v) in tools
toolname = String(k)
if contains(act, toolname)
toolNameBeingCalled = toolname
break
end
end
return toolNameBeingCalled
end
#TODO
function checkReasonableness(userMsg::String, context::String, tools)
# Ref: https://www.youtube.com/watch?v=XV4IBaZqbps
prompt =
"""
<|im_start|>system
You are a helpful assistant. Your job is to check the reasonableness of user questions.
If the user question can be answered given the tools available say, "This is a reasonable question".
If the user question cannot be answered then provide some feedback to the user that may improve
their question.
Here is the context for the question:
{context}
<|im_end|>
<|im_start|>user
{question}
<|im_end|>
<|im_start|>assistant
"""
context = "You have access to the following tools:
WineStock: useful for when you need to find info about wine by matching your description, price, name or ID. Input should be a search query with as much details as possible."
prompt = replace(prompt, "{question}" => userMsg)
prompt = replace(prompt, "{context}" => context)
output_py = llm(
prompt,
max_tokens=512,
temperature=0.1,
# top_p=top_p,
echo=false,
stop=["</response>", "<<END>>", ],
)
_output_jl = pyconvert(Dict, output_py);
output = pyconvert(Dict, _output_jl["choices"][1]);
output["text"]
end
function react_plan(text::String, firstlast="first")
"Plan" * GeneralUtils.getStringBetweenCharacters(text, "Plan", "Thought", firstlast=firstlast)
end
function react_thought(text::String, firstlast="first")
"Thought" * GeneralUtils.getStringBetweenCharacters(text, "Thought", "Act", firstlast=firstlast)
end
function react_act(text::String, firstlast="first")
"Act" * GeneralUtils.getStringBetweenCharacters(text, "Act", "ActInput", firstlast=firstlast)
end
function react_actinput(text::String, firstlast="first")
"ActInput" * GeneralUtils.getStringBetweenCharacters(text, "ActInput", "Obs", firstlast=firstlast)
end
"""
Detect given characters. Output is a list of named tuple of detected char.
```jldoctest
julia> text = "I like to eat apples and use utensils."
julia> characters = ["eat", "use", "i"]
julia> result = detectCharacters(text, characters)
4-element Vector{Any}:
(char = "i", startInd = 4, endInd = 4)
(char = "eat", startInd = 11, endInd = 13)
(char = "use", startInd = 26, endInd = 28)
(char = "i", startInd = 35, endInd = 35)
```
"""
function detectCharacters(text::T, characters::Vector{T}) where {T<:AbstractString}
result = []
for i in eachindex(text)
for char in characters
l = length(char)
char_startInd = i
char_endInd = i+l-1 # -1 because Julia use inclusive index
if char_endInd > length(text)
# skip
else
if text[char_startInd: char_endInd] == char
push!(result, (char=char, startInd=char_startInd, endInd=char_endInd))
end
end
end
end
return result
end
"""
Find a given character from a vector of named tuple.
Output is character location index inside detectedCharacters
```jldoctest
julia a = [ (char = "i", startInd = 4, endInd = 4)
(char = "eat", startInd = 11, endInd = 13)
(char = "use", startInd = 26, endInd = 28)
(char = "i", startInd = 35, endInd = 35) ]
julia> findDetectedCharacter(a, "i")
[1, 4]
```
"""
function findDetectedCharacter(detectedCharacters, character)
allchar = [i[1] for i in detectedCharacters]
return findall(isequal.(allchar, character))
end