This commit is contained in:
2023-12-07 08:54:02 +00:00
parent 7482f1e933
commit c4d090ee72
2 changed files with 93 additions and 24 deletions

View File

@@ -314,15 +314,16 @@ function planner_mistral_openorca(a::agentReflex)
prompt = replace(prompt, "{toolnames}" => toolnames)
prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines")
# prepare contex
context =
"""
{earlierConversation}
{env state}
{longterm memory}
"""
context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)")
context = replace(context, "{env state}" => "")
context = replace(context, "{longterm memory}" => "")
"""
My earlier talk with the user:
$(a.earlierConversation)
My earlier experience
$(experience(a.memory[:longterm]))
"""
prompt = replace(prompt, "{context}" => context)
@@ -379,7 +380,7 @@ function actor_mistral_openorca(a::agentReflex)
prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:actor])
prompt = replace(prompt, "{step}" => a.step)
s = shortMemoryToString(a.memory[:shortterm], ["user:", "Plan 1:"])
s = dictToString(a.memory[:shortterm], ["user:", "Plan 1:"])
prompt = replace(prompt, "{shorttermMemory}" => s)
toolnames = ""
@@ -457,7 +458,7 @@ function conversation(a::T, usermsg::String) where {T<:agent}
thinkingmode = chooseThinkingMode(a, usermsg)
@show thinkingmode
if thinkingmode == :no_thinking
a.context = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details
a.context = conversationSummary(a)
_ = addNewMessage(a, "user", usermsg)
prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode)
@show prompt
@@ -600,12 +601,13 @@ function conversation(a::agentReflex, usermsg::String; attemptlimit::Int=3)
a.attemptlimit = attemptlimit
respond = nothing
a.earlierConversation = conversationSummary(a)
# determine thinking mode
a.thinkingmode = chooseThinkingMode(a, usermsg)
@show a.thinkingmode
if a.thinkingmode == :no_thinking
a.earlierConversation = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details
_ = addNewMessage(a, "user", usermsg)
prompt = chat_mistral_openorca(a, usermsg)
println("")
@@ -628,12 +630,11 @@ function work(a::agentReflex, usermsg::String)
respond = nothing
if a.thinkingmode == :new_thinking
a.earlierConversation = conversationSummary(a)
_ = addNewMessage(a, "user", usermsg)
a.memory[:shortterm]["user:"] = usermsg
a.memory[:log]["user:"] = usermsg
a.newplan = true
elseif a.thinkingmode == :continue_thinking #TODO
elseif a.thinkingmode == :continue_thinking
println("continue_thinking!!")
_ = addNewMessage(a, "user", usermsg)
a.memory[:shortterm]["Obs $(a.step):"] = usermsg
@@ -710,7 +711,6 @@ function work(a::agentReflex, usermsg::String)
a.memory[:shortterm] = keepOnlyKeys(a.memory[:shortterm], ["user:"])
#TODO add lesson and context into longterm memory
headerToDetect = ["Lesson:", "Context:", ]
headers = detectCharacters(lessonwithcontext, headerToDetect)
chunkedtext = chunktext(lessonwithcontext, headers)
@@ -723,7 +723,7 @@ function work(a::agentReflex, usermsg::String)
error("undefied condition, actorstate $actorstate $(@__LINE__)")
break
end
else #TODO attempt limit reached, force AI to answer
else
error("attempt limit reach")
break
end
@@ -854,7 +854,7 @@ function actor(a::agentReflex)
a.memory[:shortterm]["Obs $(a.step):"] = toolresult
a.memory[:log]["Obs $(a.step):"] = toolresult
end
else #TODO finish all steps
else
actorState = "all steps done"
msgToUser = nothing
break
@@ -999,9 +999,9 @@ julia> shorttermMemory = OrderedDict{String, Any}(
"Obs 2:" => "No info available for your search query.")
julia> report = analyze(agent, shorttermMemory)
```
""" #WORKING analyze sometime result in empty string ""
"""
function analyze(a)
shorttermMemory = shortMemoryToString(a.memory[:shortterm], ["user:"])
shorttermMemory = dictToString(a.memory[:shortterm], ["user:"])
prompt =
"""
<|im_start|>system
@@ -1110,7 +1110,7 @@ julia> report = formulateUserRespond(agent, shorttermMemory)
function formulateUserRespond(a)
stimulus = a.memory[:shortterm]["user:"]
work = shortMemoryToString(a.memory[:shortterm], ["user:"])
work = dictToString(a.memory[:shortterm], ["user:"])
prompt =
"""
@@ -1164,10 +1164,10 @@ julia> shorttermMemory = OrderedDict{String, Any}(
julia> decision = goNogo(agent)
"Yes"
```
""" #BUG sometime AI ready to formulate respond before all step are completed
"""
function goNogo(a)
stimulus = a.memory[:shortterm]["user:"]
work = shortMemoryToString(a.memory[:shortterm], ["user:"])
work = dictToString(a.memory[:shortterm], ["user:"])
# prompt =
# """

View File

@@ -3,7 +3,7 @@ module utils
export makeSummary, sendReceivePrompt, chunktext, extractStepFromPlan, checkTotalStepInPlan,
detectCharacters, findDetectedCharacter, extract_number, toolNameBeingCalled,
chooseThinkingMode, conversationSummary, checkReasonableness, replaceHeaders,
addShortMem!, splittext, shortMemoryToString, removeHeaders, keepOnlyKeys
addShortMem!, splittext, dictToString, removeHeaders, keepOnlyKeys, experience
using UUIDs, Dates, DataStructures
using CommUtils, GeneralUtils
@@ -431,7 +431,7 @@ function conversationSummary(a::T) where {T<:agent}
conversation = ""
summary = "nothing"
if length(a.messages)!= 0
for msg in a.messages
for msg in a.messages[1:end-1]
role = msg[:role]
content = msg[:content]
@@ -638,7 +638,7 @@ julia> shortMemory = OrderedDict(
)
julia> headers = ["user:"]
julia> shortMemoryToString(shortMemory, headers)
julia> dictToString(shortMemory, headers)
"Thought 1: I like it.\nAct 1: chatbox\nActinput 1: I get this one.\n"
```
"""
@@ -660,6 +660,24 @@ function shortMemoryToString(shortMemory::OrderedDict,
end
function dictToString(dict::T,
skiplist::Union{Array{String}, Array{Symbol}}) where {T<:AbstractDict}
s = ""
for (k, v) in dict
if k skiplist
s1 = "$k $v"
s *= s1
# ensure a newline seperate each sentences
if s[end] != "\n"
s *= "\n"
end
end
end
return s
end
""" Remove headers of specific step from memory.
Args:
@@ -764,6 +782,57 @@ function keepOnlyKeys(dict::T1, keys::T2) where {T1<:AbstractDict, T2<:AbstractV
return newdict
end
""" Convert experience dict into 1 string for LLM to use.
Args:
dict = a dictionary contain past experience
Return:
An experience in 1 string without context keys.
# Example
```jldoctest
julia> dict = OrderedDict{String, Any}(
" This lesson can be applied to various situations => " Gathering accurate and relevant information about the user's preferences, budget, and event details is crucial for providing personalized recommendations.\n"
)
julia> experience(dict)
```
"""
function experience(dict::T) where {T<:AbstractDict}
s = ""
for (k, v) in dict
s *= v
end
return s
end