Compare commits

..

10 Commits

Author SHA1 Message Date
narawat lamaiin
f1613245bb update 2024-06-29 20:06:44 +07:00
narawat lamaiin
5d091a0b26 update 2024-04-28 08:54:59 +07:00
739944effd update 2024-04-24 11:30:38 +07:00
narawat lamaiin
7b79743ef5 update 2024-04-24 06:58:55 +07:00
narawat lamaiin
b3161e152e update 2024-04-24 06:38:59 +07:00
narawat lamaiin
3b4565cb39 update 2024-04-23 23:34:12 +07:00
narawat lamaiin
c2e3d1bae5 update 2024-04-23 23:24:51 +07:00
narawat lamaiin
9f5efb2948 update 2024-04-23 17:26:34 +07:00
narawat lamaiin
d833d5d22e update 2024-04-23 10:58:34 +07:00
narawat lamaiin
3b87f53af2 update 2024-04-23 06:51:09 +07:00
4 changed files with 317 additions and 97 deletions

View File

@@ -20,7 +20,7 @@ module ChatAgent
using .interface
#------------------------------------------------------------------------------------------------100
# ---------------------------------------------- 100 --------------------------------------------- #
""" version 0.0.5
Todo:

View File

@@ -47,15 +47,16 @@ using ..type, ..utils, ..llmfunction
julia> addNewMessage(agent1, "user", "Where should I go to buy snacks")
```
"""
function addNewMessage(a::T1, role::String, content::T2) where {T1<:agent, T2<:AbstractString}
if role a.availableRole # guard against typo
error("role is not in agent.availableRole $(@__LINE__)")
function addNewMessage(a::T1, name::String, content::T2) where {T1<:agent, T2<:AbstractString}
if name a.availableRole # guard against typo
error("name is not in agent.availableRole $(@__LINE__)")
end
# check whether user messages exceed limit
userMsg = 0
for i in a.messages
if i[:role] == "user"
if i[:name] == "user"
userMsg += 1
end
end
@@ -66,7 +67,7 @@ function addNewMessage(a::T1, role::String, content::T2) where {T1<:agent, T2<:A
messageleft = a.maxUserMsg
else
userMsg += 1
d = Dict(:role=> role, :content=> content, :timestamp=> Dates.now())
d = Dict(:name=> name, :text=> content, :timestamp=> Dates.now())
push!(a.messages, d)
messageleft = a.maxUserMsg - userMsg
end
@@ -95,7 +96,66 @@ function removeLatestMsg(a::T) where {T<:agent}
end
end
function chat_mistral_openorca(a::agentReflex)
# function chat_mistral_openorca(a::agentReflex, prompttemplate="llama3")
# """
# general prompt format:
# "
# <|system|>
# {role}
# {tools}
# {thinkingFormat}
# {context}
# <|im_end|>
# <|im_start|>user
# {usermsg}
# <|im_end|>
# <|im_start|>assistant
# "
# Note:
# {context} =
# "
# {earlierConversation}
# {env state}
# {shortterm memory}
# {longterm memory}
# "
# """
# conversation = messagesToString(a.messages)
# aboutYourself =
# """
# Your name is $(a.name)
# $(a.roles[a.role])
# """
# prompt =
# """
# <|system|>
# <About yourself>
# $aboutYourself
# </About yourself>
# </s>
# $conversation
# <|assistant|>
# """
# response = sendReceivePrompt(a, prompt, a.config[:text2textchat][:mqtttopic],
# timeout=180, stopword=["<|", "</"])
# response = split(response, "<|")[1]
# response = split(response, "</")[1]
# return response
# end
function chat_mistral_openorca(a::agentReflex, prompttemplate="llama3")
"""
general prompt format:
@@ -122,30 +182,14 @@ function chat_mistral_openorca(a::agentReflex)
{longterm memory}
"
"""
conversation = messagesToString(a.messages)
aboutYourself =
"""
Your name is $(a.name)
$(a.roles[a.role])
"""
conversation = formatLLMtext(a.messages, "llama3instruct")
prompt =
"""
<|system|>
<About yourself>
$aboutYourself
</About yourself>
</s>
$conversation
<|assistant|>
"""
response = sendReceivePrompt(a, prompt, a.config[:text2text][:mqtttopic],
timeout=180, stopword=["<|", "</"])
response = split(response, "<|")[1]
response = split(response, "</")[1]
_response = sendReceivePrompt(a, prompt, a.config[:text2textchat][:mqtttopic],
max_tokens=1024, timeout=180,)
response = _response
return response
end
@@ -193,14 +237,14 @@ function planner_mistral_openorca(a::agentReflex)
# skip objective and plan because LLM is going to generate new plan
shorttermMemory = dictToString(a.memory[:shortterm], skiplist=["Objective:", "Plan 1:"])
@show "---> 2"
aboutYourself =
"""
Your name is $(a.name)
$(a.roles[a.role])
$(a.roleSpecificInstruction[a.role])
"""
@show "---> 3"
# assistant_plan_prompt =
# """
# <s>
@@ -269,7 +313,7 @@ function planner_mistral_openorca(a::agentReflex)
"""
response = sendReceivePrompt(a, prompt, a.config[:text2text][:mqtttopic], max_tokens=512,
timeout=180, stopword=["<|user|>", "</"])
timeout=180, stopword=["<|eot_id|>"])
response = split(response, "<|")[1]
response = split(response, "</")[1]
@@ -1228,32 +1272,27 @@ function conversation(a::agentReflex, usermsg::String; attemptlimit::Int=3)
a.attemptlimit = attemptlimit
workstate = nothing
response = nothing
_ = addNewMessage(a, "user", usermsg)
isuseplan = isUsePlans(a)
# newinfo = extractinfo(a, usermsg)
# a.env = newinfo !== nothing ? updateEnvState(a, newinfo) : a.env
@show isuseplan
if isuseplan # use plan before responding
if haskey(a.memory[:shortterm], "User:") == false #TODO should change role if user want to buy wine.
a.memory[:shortterm]["User:"] = usermsg
end
workstate, response = work(a)
end
# if LLM using askbox, use returning msg form askbox as conversation response
if workstate == "askbox" || workstate == "formulatedUserResponse"
#TODO paraphrase msg so that it is human friendlier word.
else
response = chat_mistral_openorca(a)
response = split(response, "\n\n")[1]
response = split(response, "\n\n")[1]
end
response = removeTrailingCharacters(response)
@show rawResponse = response
response = replace(response, '\n'=>"[br]") # Ken's LINE service use [br] instead of "\n"
# response = removeTrailingCharacters(response)
_ = addNewMessage(a, "assistant", response)
return response
end
@@ -1275,7 +1314,7 @@ function work(a::agentReflex)
latestTask = shortMemLatestTask(a.memory[:shortterm])
if haskey(a.memory[:shortterm], "Act $latestTask:")
if occursin("askbox", a.memory[:shortterm]["Act $latestTask:"])
a.memory[:shortterm]["Obs $latestTask:"] = "(user response) " * a.messages[end][:content]
a.memory[:shortterm]["Obs $latestTask:"] = "(user response) " * a.messages[end][:text]
end
end
end

View File

@@ -229,6 +229,10 @@ function agentReflex(
roles = roles,
roleSpecificInstruction = roleSpecificInstruction,
)
systemChatMsg = Dict(:name=> "system", :text=> "You are a helpful, respectful and honest assistant.",)
push!(newAgent.messages, systemChatMsg)
return newAgent

View File

@@ -5,7 +5,8 @@ export sendReceivePrompt, chunktext, extractStepFromPlan, checkTotalTaskInPlan,
isUsePlans, conversationSummary, checkReasonableness, replaceHeaders,
addShortMem!, splittext, dictToString, removeHeaders, keepOnlyKeys, experience,
messagesToString, messagesToString_nomark, removeTrailingCharacters, shortMemLatestTask,
keywordMemoryUpdate!
keywordMemoryUpdate!, formatLLMtext_llama3instruct, formatLLMtext_phi3instruct,
formatLLMtext
using UUIDs, Dates, DataStructures, HTTP, MQTTClient, JSON3
using GeneralUtils
@@ -53,7 +54,6 @@ function sendReceivePrompt(a::T1, prompt::String, sendtopic::String;
max_tokens::Integer=256, timeout::Integer=120, temperature::AbstractFloat=0.2,
stopword::T2=["nostopwordyet"],
seed=nothing) where {T1<:agent, T2<:Vector{<:AbstractString}}
# copy a.msgMeta instead of using GeneralUtils.generate_msgMeta because if I want to custom
# msgMeta for some communication I can do it during that agent instantiation and the custom
# msgMeta will effect all of the communication in that agent without effecting all agent
@@ -62,23 +62,26 @@ function sendReceivePrompt(a::T1, prompt::String, sendtopic::String;
msgMeta[:sendTopic] = sendtopic
msgMeta[:senderName] = "agent-wine-backend"
msgMeta[:senderId] = a.id
msgMeta[:receiverName] = "text2text"
msgMeta[:receiverName] = "text2textchat"
msgMeta[:replyTopic] = a.config[:receiveinternal][:mqtttopic]
msgMeta[:msgId] = string(uuid4())
msgMeta[:mqttServerInfo][:broker] = "mqtt.yiem.cc"
outgoing_msg = Dict(
:msgMeta=> msgMeta,
:text=> prompt,
:max_tokens=> max_tokens,
:temperature=> temperature,
:stopword=> stopword,
:seed=> seed,
:payload=> Dict(
:text=> prompt,
:max_tokens=> max_tokens,
:temperature=> temperature,
:stopword=> stopword,
:seed=> seed,
),
)
# send prompt
@show outgoing_msg
publish(a.mqttClient, outgoing_msg[:msgMeta][:sendTopic],
JSON3.write(outgoing_msg))
JSON3.write(outgoing_msg))
starttime = Dates.now()
result = nothing
@@ -86,9 +89,10 @@ function sendReceivePrompt(a::T1, prompt::String, sendtopic::String;
while true
timepass = GeneralUtils.time_difference(starttime, Dates.now(), "seconds")
if isready(a.mqttMsg_internal)
payload = take!(a.mqttMsg_internal)
if payload[:msgMeta][:replyToMsgId] == outgoing_msg[:msgMeta][:msgId]
result = haskey(payload, :text) ? payload[:text] : nothing
incomingMsg = take!(a.mqttMsg_internal)
incomingPayload = incomingMsg[:payload]
if incomingMsg[:msgMeta][:replyToMsgId] == outgoing_msg[:msgMeta][:msgId]
result = haskey(incomingPayload, :text) ? incomingPayload[:text] : nothing
break
end
elseif timepass <= timeout
@@ -376,50 +380,6 @@ end
2. objective # what LLM going to do
"""
function isUsePlans(a::agentReflex)
toollines = ""
for (toolname, v) in a.tools
if toolname ["chatbox"] # LLM will always use chatbox
toolline = "$toolname is $(v[:description])\n"
toollines *= toolline
end
end
conversation = messagesToString(a.messages)
aboutYourself =
"""
Your name is $(a.name)
$(a.roles[a.role])
"""
prompt =
"""
<|system|>
<About yourself>
$aboutYourself
</About yourself>
<You have access to the following tools>
$toollines
</You have access to the following tools>
<Your earlier conversation with the user>
$conversation
</Your earlier conversation with the user>
<Your job>
Your job is to decide whether you need think thoroughly or use tools in order to respond to the user.
Use the following format:
Thought: Do you need to think thoroughly or use tools before responding to the user?
</Your job>
<Example 1>
user: Hello!. How are you?
assistant: The user is greeting me, I don't need to think about it.
</Example 1>
<Example 2>
user: "What's tomorrow weather like?"
assistant: I will need to use weather tools to check for tomorrow's temperature.
</Example 2>
</s>
<|assistant|>
"""
isuseplan = false
@show a.role
if length(a.memory[:shortterm]) != 0
@@ -429,8 +389,53 @@ function isUsePlans(a::agentReflex)
elseif a.role == :sommelier
isuseplan = true
else
toollines = ""
for (toolname, v) in a.tools
if toolname ["chatbox"] # LLM will always use chatbox
toolline = "$toolname is $(v[:description])\n"
toollines *= toolline
end
end
conversation = messagesToString(a.messages)
aboutYourself =
"""
Your name is $(a.name)
$(a.roles[a.role])
"""
prompt =
"""
<|system|>
<About yourself>
$aboutYourself
</About yourself>
<You have access to the following tools>
$toollines
</You have access to the following tools>
<Your earlier conversation with the user>
$conversation
</Your earlier conversation with the user>
<Your job>
Your job is to decide whether you need think thoroughly or use tools in order to respond to the user.
Use the following format:
Thought: Do you need to think thoroughly or use tools before responding to the user?
</Your job>
<Example 1>
user: Hello!. How are you?
assistant: The user is greeting me, I don't need to think about it.
</Example 1>
<Example 2>
user: "What's tomorrow weather like?"
assistant: I will need to use weather tools to check for tomorrow's temperature.
</Example 2>
</s>
<|assistant|>
"""
# if LLM mentions any tools, use Plan/Thought/Act loop
response = sendReceivePrompt(a, prompt, a.config[:text2text][:mqtttopic], max_tokens=64,
response = sendReceivePrompt(a, prompt, a.config[:text2textchat][:mqtttopic], max_tokens=64,
timeout=180, stopword=["<|", "</"])
response = split(response, "<|")[1]
if occursin("yes", String(response))
@@ -544,6 +549,29 @@ function messagesToString(messages::AbstractVector{T}; addressAIas="assistant")
return conversation
end
function formatLLMtext_llama3(name::T, text::T, isbegintext::Bool=false) where {T<:AbstractString}
formattedtext =
if isbegintext
"""<|begin_of_text|>
<|start_header_id|>$name<|end_header_id|>
$text
<|eot_id|>
"""
else
"""
<|start_header_id|>$name<|end_header_id|>
$text
<|eot_id|>
"""
end
return formattedtext
end
function messagesToString(name::T, text::T, templateName::T) where {T<:AbstractString}
end
# function messagesToString(messages::AbstractVector{T}; addressAIas="assistant") where {T<:AbstractDict}
# conversation = ""
# if length(messages)!= 0
@@ -680,7 +708,7 @@ function removeTrailingCharacters(text; charTobeRemoved::AbstractVector{T}=['\n'
end
end
return text[1:end-nouse]
return String(text[1:end-nouse])
end
@@ -1058,8 +1086,157 @@ function checkSimilarKey(dict::AbstractDict, key::AbstractString)
end
""" Convert a single chat dictionary into LLM model instruct format.
# Llama 3 instruct format example
<|system|>
You are a helpful AI assistant.<|end|>
<|user|>
I am going to Paris, what should I see?<|end|>
<|assistant|>
Paris, the capital of France, is known for its stunning architecture, art museums."<|end|>
<|user|>
What is so great about #1?<|end|>
<|assistant|>
# Arguments
- `name::T`
message owner name e.f. "system", "user" or "assistant"
- `text::T`
# Return
- `formattedtext::String`
text formatted to model format
# Example
```jldoctest
julia> using Revise
julia> using YiemAgent
julia> d = Dict(:name=> "system",:text=> "You are a helpful, respectful and honest assistant.",)
julia> formattedtext = YiemAgent.formatLLMtext_phi3instruct(d[:name], d[:text])
```
Signature
"""
function formatLLMtext_phi3instruct(name::T, text::T) where {T<:AbstractString}
formattedtext =
"""
<|$name|>
$text<|end|>\n
"""
return formattedtext
end
""" Convert a single chat dictionary into LLM model instruct format.
# Llama 3 instruct format example
<|begin_of_text|>
<|start_header_id|>system<|end_header_id|>
You are a helpful assistant.
<|eot_id|>
<|start_header_id|>user<|end_header_id|>
Get me an icecream.
<|eot_id|>
<|start_header_id|>assistant<|end_header_id|>
Go buy it yourself at 7-11.
<|eot_id|>
# Arguments
- `name::T`
message owner name e.f. "system", "user" or "assistant"
- `text::T`
# Return
- `formattedtext::String`
text formatted to model format
# Example
```jldoctest
julia> using Revise
julia> using YiemAgent
julia> d = Dict(:name=> "system",:text=> "You are a helpful, respectful and honest assistant.",)
julia> formattedtext = YiemAgent.formatLLMtext_llama3instruct(d[:name], d[:text])
"<|begin_of_text|>\n <|start_header_id|>system<|end_header_id|>\n You are a helpful, respectful and honest assistant.\n <|eot_id|>\n"
```
Signature
"""
function formatLLMtext_llama3instruct(name::T, text::T) where {T<:AbstractString}
formattedtext =
if name == "system"
"""
<|begin_of_text|>
<|start_header_id|>$name<|end_header_id|>
$text
<|eot_id|>\n
"""
else
"""
<|start_header_id|>$name<|end_header_id|>
$text
<|eot_id|>\n
"""
end
return formattedtext
end
""" Convert a chat messages in vector of dictionary into LLM model instruct format.
# Arguments
- `messages::Vector{Dict{Symbol, T}}`
message owner name e.f. "system", "user" or "assistant"
- `formatname::T`
format name to be used
# Return
- `formattedtext::String`
text formatted to model format
# Example
```jldoctest
julia> using Revise
julia> using YiemAgent
julia> chatmessage = [
Dict(:name=> "system",:text=> "You are a helpful, respectful and honest assistant.",),
Dict(:name=> "user",:text=> "list me all planets in our solar system.",),
Dict(:name=> "assistant",:text=> "I'm sorry. I don't know. You tell me.",),
]
julia> formattedtext = YiemAgent.formatLLMtext(chatmessage, "llama3instruct")
"<|begin_of_text|>\n <|start_header_id|>system<|end_header_id|>\n You are a helpful, respectful and honest assistant.\n <|eot_id|>\n <|start_header_id|>user<|end_header_id|>\n list me all planets in our solar system.\n <|eot_id|>\n <|start_header_id|>assistant<|end_header_id|>\n I'm sorry. I don't know. You tell me.\n <|eot_id|>\n"
```
# Signature
"""
function formatLLMtext(messages::Vector{Dict{Symbol, T}},
formatname::String="llama3instruct") where {T<:Any}
f = if formatname == "llama3instruct"
formatLLMtext_llama3instruct
elseif formatname == "mistral"
# not define yet
elseif formatname == "phi3instruct"
formatLLMtext_phi3instruct
else
error("$formatname template not define yet")
end
str = ""
for t in messages
str *= f(t[:name], t[:text])
end
if formatname == "phi3instruct"
str *= "<|assistant|>\n"
end
return str
end