diff --git a/src/interface.jl b/src/interface.jl
index ba43b04..d6d72b7 100755
--- a/src/interface.jl
+++ b/src/interface.jl
@@ -123,19 +123,27 @@ function chat_mistral_openorca(a::agentReflex)
conversation = messagesToString(a.messages)
+ aboutYourself =
+ """
+ Your name is $(a.agentName)
+ $(a.roles[a.role])
+ """
+
prompt =
"""
<|system|>
- $(a.roles[a.role])
- Your earlier talk with the user:
- $(a.earlierConversation)
+
+ $aboutYourself
+
+
+ $(a.earlierConversation)
+
$conversation
<|assistant|>
"""
- response = sendReceivePrompt(a, prompt, timeout=180)
- response = split(response, "<|im_end|>")[1]
+ response = sendReceivePrompt(a, prompt, timeout=180, stopword=["<|", ""])
return response
end
@@ -180,38 +188,38 @@ function planner_mistral_openorca(a::agentReflex)
# skip objective and plan because LLM is going to generate new plan
shorttermMemory = dictToString(a.memory[:shortterm], skiplist=["Objective:", "Plan 1:"])
+
+ aboutYourself =
+ """
+ Your name is $(a.agentName)
+ $(a.roles[a.role])
+ """
assistant_plan_prompt =
"""
<|system|>
- $(a.roles[a.role])
- Required info you need for wine recommendation:
- - occasion: ask the user
- - type of food that will be served with wine: ask the user
- - ambient temperature at the serving location: ask the user
- - type of wine (Rose, White, Red and Sparkling): ask the user
- - user's personal taste of wine characteristic: ask the user
- - wine price range: ask the user
- - wines we have in stock: use winestock tool
-
- You have access to the following tools:
- $toollines
-
- Your earlier work:
- $shorttermMemory
-
- Your job is to do the following:
- Plan: first you should always think about your conversation with the user and your earlier work thoroughly then extract and devise a complete, task by task plan to achieve your objective (pay attention to correct numeral calculation and commonsense).
- P.S.1 each task of the plan should be a single action.
+
+ $aboutYourself
+
+
+ $toollines
+
+ $shorttermMemory
+
+
+ Plan: first you should always think about your conversation with the user and your earlier work thoroughly then extract and devise a complete, task by task plan to achieve your objective (pay attention to correct numeral calculation and commonsense).
+ P.S.1 each task of the plan should be a single action.
+
$conversation
<|assistant|>
Plan:
"""
- plan = sendReceivePrompt(a, assistant_plan_prompt, max_tokens=512, temperature=0.1)
- plan = split(plan, "<|")[1]
- plan = split(plan, "\n\n")[1]
+ plan = sendReceivePrompt(a, assistant_plan_prompt, max_tokens=512, temperature=0.1, stopword=["<|", ""])
+ # plan = split(plan, "<|")[1]
+ # plan = split(plan, "\n\n")[1]
return plan
end
@@ -358,10 +366,18 @@ function actor_mistral_openorca(a::agentReflex, taskrecap="")
"""
end
+ aboutYourself =
+ """
+ Your name is $(a.agentName)
+ $(a.roles[a.role])
+ """
+
prompt =
"""
<|system|>
- $(a.roles[a.role])
+
+ $aboutYourself
+
$toollines
@@ -395,7 +411,8 @@ function actor_mistral_openorca(a::agentReflex, taskrecap="")
while true # while Thought or Act is empty, run actor again
tempcounter += 0.2
@show tempcounter
- response = sendReceivePrompt(a, prompt, max_tokens=1024, temperature=tempcounter, timeout=180)
+ response = sendReceivePrompt(a, prompt, max_tokens=1024, temperature=tempcounter, timeout=180,
+ stopword=["/n/n", "END", "End", "Obs", "<|", ""])
response = splittext(response, ["/n/n", "END", "End", "Obs", "<|im_end|>"])
latestTask = shortMemLatestTask(a.memory[:shortterm]) +1
diff --git a/src/type.jl b/src/type.jl
index 2ddbbeb..163515f 100644
--- a/src/type.jl
+++ b/src/type.jl
@@ -119,31 +119,31 @@ function agentReflex(
subtopic= (imgAI="agent/api/v0.1.0/img/respond",
txtAI="agent/api/v0.1.0/txt/respond"),
keepalive= 30,),
- role::Symbol=:assistant,
- roles::Dict=Dict(
+ role::Symbol=:assistant,
+ roles::Dict=Dict(
:assistant =>
"""
- You are a helpful assistant who respond to user as best you can.
+ You are a helpful assistant.
""",
:sommelier =>
"""
- You are a helpful sommelier at an online wine reseller who help users choosing their wine from your inventory.
+ You are a helpful sommelier at a wine retailer. You helps users choosing their wine from your inventory.
You don't know other people personal info previously.
""",
- # :sommelier =>
- # """
- # You are a sommelier at an online wine reseller who always ask user for wine relevant info before you could help them choosing wine.
- # You provide a personalized recommendation of up to two wines based on the user's preference, and you describe the benefits of each wine in detail.
- # You don't know other people personal info previously.
-
- # Info used to select wine:
- # - type of food
- # - occasion
- # - user's personal taste of wine
- # - wine price range
- # - temperature at the serving location
- # - wine we have in stock
- # """,
+ ),
+ roleSpecificInstruction::Dict=Dict(
+ :assistant => "",
+ :sommelier =>
+ """
+ Required info you need for wine recommendation:
+ - occasion: ask the user
+ - type of food that will be served with wine: ask the user
+ - ambient temperature at the serving location: ask the user
+ - type of wine (Rose, White, Red and Sparkling): ask the user
+ - user's personal taste of wine characteristic: ask the user
+ - wine price range: ask the user
+ - wines we have in stock: use winestock tool
+ """
),
thinkingFormat::Dict=Dict(
:react=>
diff --git a/src/utils.jl b/src/utils.jl
index 404c129..dff04f2 100644
--- a/src/utils.jl
+++ b/src/utils.jl
@@ -26,13 +26,14 @@ using ..type
```
"""
function sendReceivePrompt(a::T, prompt::String; max_tokens=256, timeout::Int=120,
- temperature::AbstractFloat=0.2) where {T<:agent}
+ temperature::AbstractFloat=0.2, stopword=[]) where {T<:agent}
a.msgMeta[:msgId] = "$(uuid4())" # new msg id for each msg
msg = Dict(
:msgMeta=> a.msgMeta,
:txt=> prompt,
:max_tokens=> max_tokens,
:temperature=> temperature,
+ :stopword=> stopword,
)
payloadChannel = Channel(1)
@@ -343,44 +344,52 @@ function isUsePlans(a::agentReflex)
conversation = messagesToString(a.messages)
+ aboutYourself =
+ """
+ Your name is $(a.agentName)
+ $(a.roles[a.role])
+ """
+
prompt =
"""
<|system|>
- You are a helpful assistant.
- You have access to the following tools:
- $toollines
-
-
- Your job is to decide whether you need think thoroughly in order to respond to the user according to your conversation with the user and tools you have.
-
-
- user: Hello!. How are you?
- assistant: {"thought": "the user is greeting me, I don't need to think about it.", "anwer": "no"}
-
-
- user: "I want to get a bottle of wine."
- assistant: {"thought": "the user show interest to purchase wine from me.", "anwer": "yes"}
-
+
+ $aboutYourself
+
+
+ $toollines
+
+
+ Your job is to decide whether you need think thoroughly or use tools in order to respond to the user's question.
+
+
+ user: Hello!. How are you?
+ assistant: {"thought": "the user is greeting me, I don't need to think about it.", "anwer": "no"}
+
+
+ user: "What's tomorrow weather like?"
+ assistant: {"thought": "I will need to use weather tools to check for tomorrow's temperature.", "anwer": "yes"}
+
$conversation
<|assistant|>
"""
-
- # if LLM mentions any tools, use Plan/Thought/Act loop
isuseplan = false
- response = sendReceivePrompt(a, prompt, temperature=0.2, max_tokens=64)
- response = split(response, "<|assistant|>")[1]
- response = split(response, "<|user|>")[1]
-
- for (toolname, v) in a.tools
- if occursin("Yes", String(response))
- isuseplan = true
- break
- end
- end
if length(a.memory[:shortterm]) != 0
isuseplan = true
+ elseif a.role == :sommelier
+ isuseplan = true
+ else
+ # if LLM mentions any tools, use Plan/Thought/Act loop
+ response = sendReceivePrompt(a, prompt, temperature=0.2, max_tokens=64, stopword=["<|", ""])
+ for (toolname, v) in a.tools
+ if occursin("Yes", String(response))
+ isuseplan = true
+ break
+ end
+ end
+
end
return isuseplan