diff --git a/src/interface.jl b/src/interface.jl
index 1a1ac50..b91e7cc 100755
--- a/src/interface.jl
+++ b/src/interface.jl
@@ -3,7 +3,7 @@ module interface
export agentReact, addNewMessage, clearMessage, removeLatestMsg, generatePrompt_tokenPrefix,
generatePrompt_tokenSuffix, conversation, work, detectCharacters, chunktext,
- findDetectedCharacter
+ findDetectedCharacter, wikisearch
using JSON3, DataStructures, Dates, UUIDs, HTTP
using CommUtils, GeneralUtils
@@ -59,7 +59,8 @@ abstract type agent end
messages = Vector{Dict{Symbol, Any}}()
context::String = "nothing" # internal thinking area
tools::Union{Dict, Nothing} = nothing
- thought::String = "nothing" # contain unfinished thoughts
+ thought::String = "nothing" # contain unfinished thoughts for ReAct agent only
+ thinkingMode::Union{Dict, Nothing} = nothing
end
function agentReact(
@@ -68,39 +69,14 @@ function agentReact(
role::Symbol=:assistant,
roles::Dict=Dict(
:assistant =>
- """
- You are a helpful assistant.
- """,
- :assistant_react =>
"""
You are a helpful assistant. You don't know other people personal info previously.
-
- Use the following format:
- QTS: the input question your user is asking and you must answer
- Plan: first you should always think about the question and the info you have thoroughly then extract and devise a complete plan to find the answer (pay attention to variables and their corresponding numerals).
- Thought: you should always think about the info you need and what to do (pay attention to correct numeral calculation and commonsense).
- Act: the action tool related to what you intend to do, should be one of {toolnames}
- ActInput: the input to the action (pay attention to the tool's input)
- Obs: the result of the action
- ..... (this Plan/Thought/Act/ActInput/Obs loop can repeat N times.)
- Thought: I think I know the answer
- ANS: Answer of the original question and the rationale behind your answer
""",
:sommelier =>
"""
You are a sommelier at an online wine reseller who always ask user for wine relevant info before you could help them choosing wine.
- You usually recommend atmost 2 wines for customers.
+ You provide a personalized recommendation of up to two wines based on the user's preference, and you describe the benefits of each wine in detail.
You don't know other people personal info previously.
-
- Use the following format:
- QTS: the input question your user is asking and you must answer
- Plan: first you should always think about the question and the info you have thoroughly then extract and devise a complete plan to find the answer (pay attention to variables and their corresponding numerals).
- Thought: ask yourself do you have all the info you need? And what to do (pay attention to correct numeral calculation and commonsense).
- Act: the tool that match your thought, should be one of {toolnames}
- ActInput: the input to the action (pay attention to the tool's input)
- Obs: the result of the action
- ..... (this Plan/Thought/Act/ActInput/Obs loop can repeat N times until you know the answer.)
- ANS: Answer of the original question. You describe detailed benefits of each answer to user's preference.
Info used to select wine:
- type of food
@@ -111,13 +87,28 @@ function agentReact(
- wine we have in stock
""",
),
+ thinkingMode::Dict=Dict(
+ :nothinking=> "",
+ :react=>
+ "Use the following format:
+ Question: the input question your user is asking and you must answer
+ Plan: first you should always think about the question and the info you have thoroughly then extract and devise a complete plan to find the answer (pay attention to variables and their corresponding numerals).
+ Thought: ask yourself do you have all the info you need? And what to do (pay attention to correct numeral calculation and commonsense).
+ Act: the tool that match your thought, should be one of {toolnames}
+ ActInput: the input to the action (pay attention to the tool's input)
+ Obs: the result of the action
+ ... (this Plan/Thought/Act/ActInput/Obs loop can repeat N times until you know the answer.)
+ Answer: Answer of the original question.
+
+ Begin!",
+ ),
tools::Dict=Dict(
:wikisearch=>Dict(
:name => "wikisearch",
:description => "Useful for when you need to search the Internet",
- :input => "Input should be a search keywords.",
+ :input => "Input should be a keyword not a question.",
:output => "",
- :func => nothing # put function here
+ :func => wikisearch, # put function here
),
:chatbox=>Dict(
:name => "chatbox",
@@ -164,6 +155,7 @@ function agentReact(
newAgent.tools = tools
newAgent.role = role
newAgent.roles = roles
+ newAgent.thinkingMode = thinkingMode
return newAgent
end
@@ -219,61 +211,51 @@ function removeLatestMsg(a::T) where {T<:agent}
end
end
-# function generatePrompt_tokenSuffix(a::agentReact;
-# userToken::String="[/INST]", assistantToken="[INST]",
-# systemToken="[INST]<> content <>")
-# prompt = nothing
-# for msg in a.messages
-# role = msg[:role]
-# content = msg[:content]
-
-# if role == "system"
-# prompt = replace(systemToken, "content" => content) * " "
-# elseif role == "user"
-# prompt *= " " * content * " " * userToken
-# elseif role == "assistant"
-# prompt *= " " * content * " " * assistantToken
-# else
-# error("undefied condition role = $role")
-# end
+# function generatePrompt_mistral_openorca(a::T, usermsg::String, role::Symbol) where {T<:agent}
+# prompt =
+# """
+# <|im_start|>system
+# {systemMsg}
+# <|im_end|>
+# Here are the context for the question:
+# {context}
+# """
+# prompt = replace(prompt, "{systemMsg}" => a.roles[role])
+
+# toolnames = ""
+# toollines = ""
+# for (toolname, v) in a.tools
+# toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
+# toollines *= toolline
+# toolnames *= "$toolname,"
# end
+# prompt = replace(prompt, "{toolnames}" => toolnames)
+# prompt = replace(prompt, "{tools}" => toollines)
+
+# prompt = replace(prompt, "{context}" => a.context)
+
+# prompt *= "<|im_start|>user\n" * usermsg * "\n<|im_end|>\n"
+# prompt *= "<|im_start|>assistant\n"
# return prompt
# end
-# function generatePrompt_tokenPrefix(a::agentReact;
-# userToken::String="Q:", assistantToken="A:",
-# systemToken="[INST]<> content <>")
-# prompt = nothing
-# for msg in a.messages
-# role = msg[:role]
-# content = msg[:content]
+function generatePrompt_mistral_openorca(a::T, usermsg::String,
+ thinkingMode::Symbol=:nothinking) where {T<:agent}
-# if role == "system"
-# prompt = replace(systemToken, "content" => content) * " "
-# elseif role == "user"
-# prompt *= userToken * " " * content * " "
-# elseif role == "assistant"
-# prompt *= assistantToken * " " * content * " "
-# else
-# error("undefied condition role = $role")
-# end
-# end
-
-# return prompt
-# end
-
-function generatePrompt_mistral_openorca(a::T, usermsg::String) where {T<:agent}
prompt =
"""
<|im_start|>system
{systemMsg}
+ You have access to the following tools:
+ {tools}
+ {thinkingMode}
<|im_end|>
Here are the context for the question:
{context}
"""
- prompt = replace(prompt, "{systemMsg}" => a.roles[:assistant])
-
+ prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
+ prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode])
toolnames = ""
toollines = ""
for (toolname, v) in a.tools
@@ -286,47 +268,8 @@ function generatePrompt_mistral_openorca(a::T, usermsg::String) where {T<:agent}
prompt = replace(prompt, "{context}" => a.context)
- prompt *= "<|im_start|>user\n" * usermsg * "\n<|im_end|>\n"
+ prompt *= "<|im_start|>user\nQuestion: " * usermsg * "\n<|im_end|>\n"
prompt *= "<|im_start|>assistant\n"
-
- return prompt
-end
-
-function generatePrompt_react_mistral_openorca(a::T, usermsg::String,
- continuethought::Bool=false) where {T<:agent}
-
- if continuethought == false
- prompt =
- """
- <|im_start|>system
- {systemMsg}
- You have access to the following tools:
- {tools}
- Begin!
- <|im_end|>
- Here are the context for the question:
- {context}
- """
- prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
-
- toolnames = ""
- toollines = ""
- for (toolname, v) in a.tools
- toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
- toollines *= toolline
- toolnames *= "$toolname,"
- end
- prompt = replace(prompt, "{toolnames}" => toolnames)
- prompt = replace(prompt, "{tools}" => toollines)
-
- prompt = replace(prompt, "{context}" => a.context)
-
- prompt *= "<|im_start|>user\nQTS: " * usermsg * "\n<|im_end|>\n"
- prompt *= "<|im_start|>assistant\n"
- else
- a.thought *= "Obs: $_result\n"
- prompt = a.thought
- end
return prompt
end
@@ -367,42 +310,42 @@ end
julia> respond = ChatAgent.conversation(newAgent, "Hi! how are you?")
```
"""
-function conversation(a::T, usermsg::String) where {T<:agent}
- userintend = identifyUserIntention(a, usermsg)
- @show userintend
+function conversation(a::T, usermsg::String) where {T<:agent} #WORKING
respond = nothing
- # AI thinking mode
- if userintend == "chat"
- a.context = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details
+ if a.thought != "nothing" # continue thought
_ = addNewMessage(a, "user", usermsg)
- prompt = generatePrompt_mistral_openorca(a, usermsg)
- @show prompt
- respond = sendReceivePrompt(a, prompt)
- respond = split(respond, "<|im_end|>")[1]
- respond = replace(respond, "\n" => "")
- _ = addNewMessage(a, "assistant", respond)
- @show respond
- elseif userintend == "wine"
- if a.thought == "nothing" # new thought
+ a.thought *= "Obs: $usermsg\n"
+ prompt = a.thought
+ respond = work(a, prompt)
+ else # new thought
+ thinkingmode = chooseThinkingMode(a, usermsg)
+ @show thinkingmode
+ if thinkingmode == :nothinking
+ a.context = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details
+ _ = addNewMessage(a, "user", usermsg)
+ prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode)
+ @show prompt
+ respond = sendReceivePrompt(a, prompt)
+ respond = split(respond, "<|im_end|>")[1]
+ respond = replace(respond, "\n" => "")
+ _ = addNewMessage(a, "assistant", respond)
+ @show respond
+ elseif thinkingmode == :react
a.context = conversationSummary(a)
_ = addNewMessage(a, "user", usermsg)
- prompt = generatePrompt_react_mistral_openorca(a, usermsg)
- respond = work(a, prompt)
- else # continue thought
- _ = addNewMessage(a, "user", usermsg)
- prompt = generatePrompt_react_mistral_openorca(a, usermsg, continuethought=true)
+ prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode)
respond = work(a, prompt)
+ else
+ error("undefined condition thinkingmode = $thinkingmode")
end
- else
- error("undefined condition userintend = $userintend")
end
return respond
end
"""
- Continuously run llm functions except when llm is getting ANS: or chatbox.
+ Continuously run llm functions except when llm is getting Answer: or chatbox.
"""
function work(a::T, prompt::String) where {T<:agent}
respond = nothing
@@ -419,14 +362,23 @@ function work(a::T, prompt::String) where {T<:agent}
end
headers = detectCharacters(respond,
- ["QTS:", "Plan:", "Thought:", "Act:", "ActInput:", "Obs:", ".....", "ANS:"])
+ ["Question:", "Plan:", "Thought:", "Act:", "ActInput:", "Obs:", "...", "Answer:",
+ "Conclusion:", "Summary:"])
@show headers
chunkedtext = chunktext(respond, headers)
@show chunkedtext
+ if a.thought == "nothing"
+ a.thought = respond
+ else
+ a.thought *= respond
+ end
- if headers[1][:char] == "ANS:"
+ Answer = findDetectedCharacter(headers, "Answer:")
+ AnswerInd = length(Answer) != 0 ? Answer[1] : nothing
+ Act = findDetectedCharacter(headers, "Act:")
+ if length(Answer) == 1 && length(Act) == 0
a.thought = "nothing" # question finished, no more thought
- respond = chunkedtext[1][:body]
+ respond = chunkedtext[AnswerInd][:body]
_ = addNewMessage(a, "assistant", respond)
break
else
@@ -434,6 +386,14 @@ function work(a::T, prompt::String) where {T<:agent}
ActInd = findDetectedCharacter(headers, "Act:")[1]
toolname = toolNameBeingCalled(chunkedtext[ActInd][:body], a.tools)
toolinput = chunkedtext[ActInd+1][:body]
+ if occursin(" \"", toolinput)
+ toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " \"", "\"\n")
+ else
+ toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " ", "\n")
+ end
+ @show toolname
+ @show toolinput
+
if toolname == "chatbox" # chat with user
a.thought *= toolinput
@@ -441,16 +401,20 @@ function work(a::T, prompt::String) where {T<:agent}
_ = addNewMessage(a, "assistant", respond)
break
else # function call
- error("function call")
f = a.tools[Symbol(toolname)][:func]
_result = f(toolinput)
+ if _result != "No info available." #TODO for use with wikisearch(). Not good for other tools
+ _result = makeSummary(a, _result)
+ end
result = "Obs: $_result\n"
+ # result = "Obs: I found the following info. AMD is a semiconductor company making a CPU\n" #TESTING
+ @show result
a.thought *= result
prompt = a.thought
end
end
end
-
+ @show respond
return respond
end
@@ -510,98 +474,77 @@ function conversationSummary(a::T) where {T<:agent}
end
prompt = replace(prompt, "{conversation}" => conversation)
prompt = replace(prompt, "{context}" => a.context)
- println("<<<<<")
- @show prompt
result = sendReceivePrompt(a, prompt)
summary = result === nothing ? "nothing" : result
summary = replace(summary, "<|im_end|>" => "")
if summary[1:1] == "\n"
summary = summary[2:end]
end
- @show summary
- println(">>>>>")
end
return summary
end
-# function work2(a::agentReact, usermsg::String)
-# addNewMessage(a, "user", usermsg)
-# userIntent = identifyUserIntention(a, usermsg)
-# @show userIntent
+function makeSummary(a::T1, input::T2) where {T1<:agent, T2<:AbstractString}
+ prompt =
+ """
+ <|im_start|>system
+ You are a helpful assistant. Your job is to make a concise summary of a given text.
+ <|im_end|>
+
+ <|im_start|>user
+ {input}
+ <|im_end|>
+ <|im_start|>assistant
-# # checkReasonableness()
+ """
+ prompt = replace(prompt, "{input}" => input)
+ @show prompt
+ result = sendReceivePrompt(a, prompt)
+ summary = result === nothing ? "nothing" : result
+ summary = replace(summary, "<|im_end|>" => "")
+ if summary[1:1] == "\n"
+ summary = summary[2:end]
+ end
-# if userIntent == "chat"
-# prompt = generatePrompt_tokenPrefix(a, userToken="Q:", assistantToken="A:")
-# result = sendReceivePrompt(a, prompt)
-# addNewMessage(a, "assistant", result)
+ return summary
+end
-# return result
-# elseif userIntent == "task"
-# while true
-# if thought == "nothing" # no unfinished thought
-# prompt = generatePrompt_react_mistral_openorca(
-# a.messages, a.roles[a.role], a.context, a.tools)
-# output = sendReceivePrompt(a, prompt)
-# obscount = count(output["text"], "Obs:")
-# a.thought = prompt * out
-# if contains(output["text"], "ANS:") # know the answer
-# a.thought = "nothing"
-# return output["text"]
-# else
-# out = split(output["text"], "Obs:")[1] # LLM may generate long respond with multiple Obs: but I do only 1 Obs: at a time(1st).
-# act = react_act(out, "first")
-# actinput = react_actinput(out, "first")
-# toolname = toolNameBeingCalled(act, a.tools)
-# if toolname == "chatbox"
-# return actinput
-# else # function call
-# toolresult = a.tools[toolname][:func](actinput)
-# Obs = "Obs: $toolresult\n" # observe in ReAct agent
-# work(a, Obs)
-# end
-# end
-# else # continue thought
-# usermsg = "Obs: $usermsg"
-# prompt = a.thought * usermsg
-# output = sendReceivePrompt(a, prompt)
-# obs = count(output["text"], "Obs:")
-# out = split(output["text"], "Obs:")[1]
-# a.thought = prompt * out
-# if obs == 0 # llm config has too short characters generation
-# error("No Obs: detected. Probably LLM config has too short max_tokens generation")
-# elseif obs == 1 # first conversation
-# act = react_act(out, "first")
-# actinput = react_actinput(out, "first")
-# toolname = toolNameBeingCalled(act, a.tools)
-# if toolname == "chatbox"
-# return actinput
-# else # function call
-# toolresult = a.tools[toolname][:func](actinput)
-# Obs = "Obs: $toolresult\n" # observe in ReAct agent
-# work(a, Obs)
-# end
-# else # later conversation
-# act = react_act(out, "last")
-# actinput = react_actinput(out, "last")
-# toolname = toolNameBeingCalled(act, a.tools)
-# if toolname == "chatbox"
-# return actinput
-# else # function call
-# toolresult = a.tools[toolname][:func](actinput)
-# Obs = "Obs: $toolresult\n" # observe in ReAct agent
-# work(a, Obs)
-# end
-# end
-# end
-# else
-# error("user intent $userIntent not define $(@__LINE__)")
-# end
-# end
+function chooseThinkingMode(a::T, usermsg::String) where {T<:agent}
+ prompt =
+ """
+ <|im_start|>system
+ {systemMsg}
+ You have access to the following tools:
+ {tools}
+ Your need to determine now whether you will use tools or actions to answer the question.
-function workContinue(a::agent)
-
+ You have the following choices:
+ If you don't need tools or actions to answer the question say, "{no}".
+ If you need tools or actions to answer the question say, "{yes}".
+ <|im_end|>
+
+ <|im_start|>user
+ {input}
+ <|im_end|>
+ <|im_start|>assistant
+
+ """
+ toollines = ""
+ for (toolname, v) in a.tools
+ if toolname ∉ ["chatbox", "nothing"]
+ toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n"
+ toollines *= toolline
+ end
+ end
+ prompt = replace(prompt, "{systemMsg}" => a.roles[a.role])
+ prompt = replace(prompt, "{tools}" => toollines)
+ prompt = replace(prompt, "{input}" => usermsg)
+ result = sendReceivePrompt(a, prompt)
+ willusetools = GeneralUtils.getStringBetweenCharacters(result, "{", "}")
+ thinkingMode = willusetools == "yes" ? :react : :nothinking
+
+ return thinkingMode
end
@@ -627,7 +570,6 @@ function identifyUserIntention(a::T, usermsg::String) where {T<:agent}
"""
prompt = replace(prompt, "{context}" => "")
prompt = replace(prompt, "{input}" => usermsg)
-
result = sendReceivePrompt(a, prompt)
answer = result === nothing ? nothing : GeneralUtils.getStringBetweenCharacters(result, "{", "}")
@@ -670,7 +612,7 @@ end
)
```
"""
-function sendReceivePrompt(a::T, prompt::String; timeout::Int=30) where {T<:agent}
+function sendReceivePrompt(a::T, prompt::String; timeout::Int=60) where {T<:agent}
a.msgMeta[:msgId] = "$(uuid4())" # new msg id for each msg
msg = Dict(
:msgMeta=> a.msgMeta,
@@ -695,6 +637,7 @@ function sendReceivePrompt(a::T, prompt::String; timeout::Int=30) where {T<:agen
elseif timepass <= timeout
# skip, within waiting period
elseif timepass > timeout
+ println("sendReceivePrompt timeout $timepass/$timeout")
result = nothing
break
else
@@ -890,7 +833,7 @@ function chunktext(text::T, headers) where {T<:AbstractString}
end
-function wikisearch()
+function wikisearch(phrase::T) where {T<:AbstractString}
url = "https://en.wikipedia.org/w/api.php?action=query&format=json&prop=extracts&titles=$(replace(phrase, " " => "%20"))&exintro=1&explaintext=1"
response = HTTP.get(url)
json_data = JSON3.read(String(response.body))
@@ -898,8 +841,13 @@ function wikisearch()
if page_id == "-1"
return "Sorry, I couldn't find any Wikipedia page for the given phrase."
end
-
- return json_data["query"]["pages"][page_id]["extract"]
+
+ result = "No info available."
+ try
+ result = json_data["query"]["pages"][page_id]["extract"]
+ catch
+ end
+ return result
end
@@ -909,20 +857,6 @@ end
-
-
-
-
-
-
-
-
-
-
-
-
-
-