This commit is contained in:
2023-11-29 04:14:16 +00:00
parent 6ae189e701
commit 6a8e24f20f
4 changed files with 175 additions and 71 deletions

View File

@@ -247,7 +247,7 @@ function chat_mistral_openorca(a::agentReflex, usermsg::String)
end
function planner_mistral_openorca(a::agentReflex, usermsg::String)
function planner_mistral_openorca(a::agentReflex)
"""
general prompt format:
@@ -283,7 +283,11 @@ function planner_mistral_openorca(a::agentReflex, usermsg::String)
{thinkingFormat}
<|im_end|>
{context}
{shorttermMemory}
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"""
prompt = replace(prompt, "{role}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:planner])
@@ -311,23 +315,12 @@ function planner_mistral_openorca(a::agentReflex, usermsg::String)
prompt = replace(prompt, "{context}" => context)
# initialize short term memory
txt =
"""
<|im_start|>user
{usermsg}
<|im_end|>
<|im_start|>assistant
"""
txt = replace(txt, "{usermsg}" => "Stimulus: $usermsg")
a.memory[:shortterm] = txt
prompt = replace(prompt, "{shorttermMemory}" => a.memory[:shortterm])
prompt = replace(prompt, "{usermsg}" => "Stimulus: $(a.memory[:shortterm]["user"])")
return prompt
end
#WORKING try use Thought/Act/ActInput/Obs loop because some time step 2 depend on step 1
function actor_mistral_openorca(a::agentReflex, plan)
function actor_mistral_openorca(a::agentReflex)
"""
general prompt format:
@@ -369,7 +362,17 @@ function actor_mistral_openorca(a::agentReflex, plan)
prompt = replace(prompt, "{role}" => a.roles[a.role])
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:actor])
prompt = replace(prompt, "{step}" => a.step)
prompt = replace(prompt, "{shorttermMemory}" => a.memory[:shortterm])
s = ""
for (k, v) in a.memory[:shortterm]
if k ["user", "Plan 1:"]
s1 = "$k $v"
s *= s1
end
end
prompt = replace(prompt, "{shorttermMemory}" => s)
toolnames = ""
toollines = ""
for (toolname, v) in a.tools
@@ -389,7 +392,7 @@ function actor_mistral_openorca(a::agentReflex, plan)
# context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
context = replace(context, "{env state}" => "")
context = replace(context, "{longterm memory}" => "")
context = replace(context, "{plan}" => "My plan:\n$plan")
context = replace(context, "{plan}" => "Plan:\n$(a.memory[:shortterm]["Plan 1:"])")
prompt = replace(prompt, "{context}" => context)
return prompt
@@ -614,7 +617,8 @@ function work(a::agentReflex, usermsg::String)
if a.thinkingmode == :new_thinking
a.earlierConversation = conversationSummary(a)
_ = addNewMessage(a, "user", usermsg)
a.thoughtlog = "user: $usermsg\n"
a.memory[:shortterm]["user"] = usermsg
@show a.memory[:shortterm]
elseif a.thinkingmode == :continue_thinking #TODO
error("continue_thinking $(@__LINE__)")
_ = addNewMessage(a, "user", usermsg)
@@ -632,37 +636,37 @@ function work(a::agentReflex, usermsg::String)
toolname = nothing
toolinput = nothing
prompt = planner_mistral_openorca(a, usermsg)
prompt = planner_mistral_openorca(a)
@show prompt
respond = sendReceivePrompt(a, prompt)
# sometimes LLM add not-need word I don't want
plan = split(respond, "<|im_end|>")[1]
plan = split(plan, "Response:")[1]
plan = replace(plan, "Plan:"=>"Plan $(a.attempt):")
# a.memory[:shortterm] *= plan
actorstate, msgToUser = actor(a, plan)
error("333")
plan = split(plan, "Execution:")[1]
plan = split(plan, "Result:")[1]
plan = replace(plan, "Plan:"=>"")
a.memory[:shortterm]["Plan $(a.attempt):"] = plan
actorstate, msgToUser = actor(a)
if actorstate == "chatbox"
respond = msgToUser
break
elseif actorstate == "all steps done"
elseif actorstate == "all steps done" #WORKING add canceled during plan
println("all steps done")
respond = formulateRespond(a, a.memory[:shortterm])
a.memory[:shortterm] *= "Respond: $respond\n"
respond = formulateRespond(a)
a.memory[:shortterm]["Respond:"] = respond
# evaluate. if score < 8/10 try again.
headerToDetect = ["user:", "assistant:", ]
headers = detectCharacters(a.memory[:shortterm], headerToDetect)
chunkedtext = chunktext(a.memory[:shortterm], headers)
stimulus = chunkedtext["user:"]
guideline = writeEvaluationGuideline(a, stimulus)
guideline = writeEvaluationGuideline(a, a.memory[:shortterm]["user"])
@show guideline
score = grading(a, guideline, respond)
@show score
if score >= 8 # good enough answer
@show a.memory[:shortterm]
a.memory[:shortterm] = ""
a.thoughtlog = ""
a.memory[:shortterm] = OrderedDict{String, Any}()
break
else # self evaluate and reflect then try again
analysis = analyze(a, a.memory[:shortterm])
@@ -670,14 +674,13 @@ function work(a::agentReflex, usermsg::String)
lessonwithcontext = selfReflext(a, analysis)
@show lessonwithcontext
a.memory[:shortterm] = ""
a.memory[:shortterm] = OrderedDict{String, Any}()
#TODO add lesson and context into longterm memory
headerToDetect = ["Lesson:", "Context:", ]
headers = detectCharacters(lessonwithcontext, headerToDetect)
chunkedtext = chunktext(lessonwithcontext, headers)
@show chunkedtext
push!(a.memory[:longterm], Dict(:context=>chunkedtext["Context:"],
:lesson=>chunkedtext["Lesson:"]))
a.memory[:longterm][chunkedtext["Context:"] => chunkedtext["Lesson:"]]
error("22222222")
end
else
@@ -721,22 +724,23 @@ end
msgToUser = "message from assistant to user"
"""
function actor(a::agentReflex, plan)
function actor(a::agentReflex)
actorState = nothing
msgToUser = nothing
totalsteps = checkTotalStepInPlan(a, plan)
totalsteps = checkTotalStepInPlan(a)
a.step = 0
while true # Actor loop
a.step += 1
@show a.step
if a.step <= totalsteps
# WORKING in step 2, I need to use a.memory[:shortterm] as input to actor()
prompt = actor_mistral_openorca(a, plan)
prompt = actor_mistral_openorca(a)
@show prompt
respond = sendReceivePrompt(a, prompt)
# some time LLM not generate a number after headers but I want it
if occursin("Act:", respond)
headerToDetect = ["Question:", "Plan:", "Thought:",
@@ -750,8 +754,7 @@ function actor(a::agentReflex, plan)
respond = split(respond, "<|im_end|>")[1]
@show respond
# add to memory
a.memory[:shortterm] *= respond
headerToDetect = ["Question $(a.step):", "Plan $(a.step):", "Thought $(a.step):",
"Act $(a.step):", "ActInput $(a.step):", "Obs $(a.step):", "...",
@@ -759,6 +762,10 @@ function actor(a::agentReflex, plan)
headers = detectCharacters(respond, headerToDetect)
chunkedtext = chunktext(respond, headers)
@show chunkedtext
# add to memory
a.memory[:shortterm] = addShortMem!(a.memory[:shortterm], chunkedtext)
toolname = toolNameBeingCalled(chunkedtext["Act $(a.step):"], a.tools)
toolinput = chunkedtext["ActInput $(a.step):"]
@show toolname
@@ -773,11 +780,9 @@ function actor(a::agentReflex, plan)
else # function call
f = a.tools[Symbol(toolname)][:func]
result = f(a, toolinput)
_result = "\nObs $(a.step): $result\n"
a.memory[:shortterm] *= _result
a.thoughtlog *= _result
msgToUser = result
a.memory[:shortterm]["Obs $(a.step):"] = result
end
elseif #WORKING plan canceled
else #TODO finish all steps
actorState = "all steps done"
msgToUser = nothing
@@ -1010,7 +1015,6 @@ Return:
A respond for user's stimulus.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
@@ -1056,6 +1060,79 @@ function formulateRespond(a, shorttermMemory::T) where {T<:AbstractString}
return respond
end
function formulateUserRespond(a) where {T<:AbstractDict}
stimulus = a.memory[:shortterm]["user"]
work = ""
for (k, v) in a.memory[:shortterm]
if k ["user",]
work *= "$k, $v\n"
end
end
prompt =
"""
<|im_start|>system
You have access to the following tools:
chatbox: Useful for when you need to ask a customer for more context. Input should be a conversation to customer.
wikisearch: Useful for when you need to search an encyclopedia Input is keywords and not a question.
Symbol:
Stimulus: the input user gives to you and you must respond
Plan: a plan
Thought: your thought
Act: the action you took
ActInput: the input to the action
Obs: the result of the action
Stimulus:
$stimulus
Your work:
$work
From your work, formulate a respond for user's stimulus.
<|im_end|>
"""
respond = sendReceivePrompt(a, prompt)
return respond
end
""" Determine whether there is a respond available.
Args:
a, one of ChatAgent's agent.
Return:
A respond for user's stimulus.
# Example
```jldoctest
julia> using ChatAgent, CommUtils
julia> agent = ChatAgent.agentReflex("Jene")
julia> shorttermMemory =
"
user: What's AMD latest product?
assistant: Plan 1: To provide the user with information about AMD's latest product, I will search for the most recent product release from AMD.
1. Search for \"AMD latest product\" using wikisearch tool.
2. Identify the most recent product release mentioned in the search results.
3. Provide the user with the name of the latest product.
Thought 1: The user wants to know about the latest AMD products, so I should use the wikisearch tool to find information on this topic.
Act 1: wikisearch
ActInput 1: \"AMD latest product\"
Obs 1: No info available."
julia> report = formulateRespond(agent, shorttermMemory)
```
"""
function isRespond()
end
@@ -1101,11 +1178,6 @@ end

View File

@@ -41,13 +41,13 @@ function wikisearch(a::agentReflex, phrase::T) where {T<:AbstractString}
try
result = json_data["query"]["pages"][page_id]["extract"]
catch
result = "No info available. Try different keywords"
result = "No info available for your search query."
end
if result == ""
result = "No info available. Try different keywords"
result = "No info available for your search query."
end
if result != "No info available. Try different keywords" #TODO for use with wikisearch(). Not good for other tools
if result != "No info available for your search query." #TODO for use with wikisearch(). Not good for other tools
result = makeSummary(a, result)
end
return result

View File

@@ -2,7 +2,7 @@ module type
export agent, agentReflex
using Dates, UUIDs
using Dates, UUIDs, DataStructures
using CommUtils
#------------------------------------------------------------------------------------------------100
@@ -41,8 +41,8 @@ abstract type agent end
thinkingmode::Symbol = :no_thinking
thinkingFormat::Union{Dict, Nothing} = nothing
memory::Dict = Dict(
:shortterm=> "", #WORKING change to Dict
:longterm=> Vector{Dict{Symbol, Any}}(),
:shortterm=> OrderedDict{String, Any}(),
:longterm=> OrderedDict{String, Any}(),
)
end
@@ -95,7 +95,7 @@ function agentReflex(
:planner=>
"""Use the following format:
Stimulus: the input user gives to you and you must respond
Plan: first you should always think about the stimulus and the info you have thoroughly then extract and devise a step by step plan to respond (pay attention to correct numeral calculation and commonsense).
Plan: first you should always think about the stimulus and the info you have thoroughly then extract and devise a step by step plan (pay attention to correct numeral calculation and commonsense).
""",
:actor=>
"""
@@ -109,8 +109,8 @@ function agentReflex(
tools::Dict=Dict(
:chatbox=>Dict(
:name => "chatbox",
:description => "Useful for when you need to ask a customer for more context.",
:input => "Input should be a conversation to customer.",
:description => "Useful for when you need to communicate with the user.",
:input => "Input should be a conversation to the user.",
:output => "" ,
:func => nothing,
),

View File

@@ -2,9 +2,10 @@ module utils
export makeSummary, sendReceivePrompt, chunktext, extractStepFromPlan, checkTotalStepInPlan,
detectCharacters, findDetectedCharacter, extract_number, toolNameBeingCalled,
chooseThinkingMode, conversationSummary, checkReasonableness, addStepNumber
chooseThinkingMode, conversationSummary, checkReasonableness, addStepNumber,
addShortMem!
using UUIDs, Dates
using UUIDs, Dates, DataStructures
using CommUtils, GeneralUtils
using ..type
@@ -181,13 +182,14 @@ end
(char = "user", start = 56, stop = 59)
(char = "Then", start = 102, stop = 105)
julia> chunkedtext = ChatAgent.chunktext(text, headers)
2-element Vector{Any}:
(header = "First", body = ", we need to find out what kind of wine the ")
(header = "user", body = " wants.")
OrderedDict{String, String} with 3 entries:
"Act 1:" => " wikisearch"
"ActInput 1:" => " latest AMD GPU"
"Thought 1:" => " I should always think about..."
```
"""
function chunktext(text::T, headers) where {T<:AbstractString}
result = Dict()
function chunktext(text::T1, headers::T2) where {T1<:AbstractString, T2<:AbstractVector}
result = OrderedDict{String, Any}()
for (i, v) in enumerate(headers)
if i < length(headers)
@@ -230,7 +232,9 @@ function extractStepFromPlan(a::agent, plan::T, step::Int) where {T<:AbstractStr
return respond
end
function checkTotalStepInPlan(a::agent, plan::T) where {T<:AbstractString}
function checkTotalStepInPlan(a::agent)
p = a.memory[:shortterm]["Plan 1:"]
plan = "Plan 1: $p"
prompt =
"""
<|im_start|>system
@@ -430,12 +434,12 @@ function conversationSummary(a::T) where {T<:agent}
prompt = replace(prompt, "{conversation}" => conversation)
result = sendReceivePrompt(a, prompt)
summary = result === nothing ? "nothing" : result
summary = replace(summary, "<|im_end|>" => "")
summary = split(summary, "<|im_end|>")[1]
if summary[1:1] == "\n"
summary = summary[2:end]
end
end
println("conversation summary: $summary")
@show summary
return summary
end
@@ -500,6 +504,34 @@ end
""" Add chunked text to a short term memory of a chat agent
Args:
shortMem = short memory of a chat agent,
chunkedtext = a dict contains text
Return: no return
# Example
```jldoctest
julia> chunkedtext = OrderedDict{String, String}(
"Thought 1:" => " I should always think about...",
"Act 1:" => " wikisearch",
"ActInput 1:" => " latest AMD GPU",)
julia> shortMem = OrderedDict{String, Any}()
julia> addShortMem!(shortMem, chunkedtext)
OrderedDict{String, Any} with 3 entries:
"Thought 1:" => " I should always think about..."
"Act 1:" => " wikisearch"
"ActInput 1:" => " latest AMD GPU"
```
"""
function addShortMem!(shortMem::OrderedDict{String, Any}, chunkedtext::T) where {T<:AbstractDict}
for (k, v) in chunkedtext
shortMem[k] = v
end
return shortMem
end