From 6fe84e0cb181705d15d0640d378589e78fbd7e2f Mon Sep 17 00:00:00 2001 From: tonaerospace Date: Fri, 1 Dec 2023 12:18:04 +0000 Subject: [PATCH] update --- src/interface.jl | 103 +++++++++++++++++++++++++++++---------------- src/llmfunction.jl | 36 +++++++++++++++- src/type.jl | 30 +++++++------ src/utils.jl | 74 ++++++++++++++++++++++---------- 4 files changed, 170 insertions(+), 73 deletions(-) diff --git a/src/interface.jl b/src/interface.jl index ce067cb..81f1796 100755 --- a/src/interface.jl +++ b/src/interface.jl @@ -288,20 +288,20 @@ function planner_mistral_openorca(a::agentReflex) {usermsg} <|im_end|> <|im_start|>assistant - + Plan $(a.attempt): """ prompt = replace(prompt, "{role}" => a.roles[a.role]) prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:planner]) roleSpecificKnowledge = """ - You provide a personalized recommendation of up to two wines based on the user's preference, and you describe the benefits of each wine in detail. Info you need from the user to be able to help them selecting their best wine: - type of food - occasion - user's personal taste of wine - wine price range - temperature at the serving location - - wine we have in stock + - wines we have in stock + You job is to provide a personalized recommendation of up to two wines based on the user's info above, and you describe the benefits of each wine in detail. """ prompt = replace(prompt, "{roleSpecificKnowledge}" => roleSpecificKnowledge) toolnames = "" @@ -333,6 +333,7 @@ function planner_mistral_openorca(a::agentReflex) return prompt end + function actor_mistral_openorca(a::agentReflex) """ general prompt format: @@ -370,7 +371,6 @@ function actor_mistral_openorca(a::agentReflex) {context} <|im_end|> {shorttermMemory} - Thought $(a.step): """ prompt = replace(prompt, "{role}" => a.roles[a.role]) @@ -403,7 +403,7 @@ function actor_mistral_openorca(a::agentReflex) {longterm memory} {plan} """ - # context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)") + context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)") context = replace(context, "{env state}" => "") context = replace(context, "{longterm memory}" => "") context = replace(context, "{plan}" => "My plan:\n$(a.memory[:shortterm]["Plan 1:"])") @@ -633,10 +633,12 @@ function work(a::agentReflex, usermsg::String) _ = addNewMessage(a, "user", usermsg) a.memory[:shortterm]["user:"] = usermsg a.memory[:log]["user:"] = usermsg + a.newplan = true + a.attempt = 0 elseif a.thinkingmode == :continue_thinking #TODO - error("continue_thinking $(@__LINE__)") + println("continue_thinking!!") _ = addNewMessage(a, "user", usermsg) - a.memory[:shortterm]["Obs $(a.step):"] = usermsg + a.memory[:shortterm]["Obs $(a.step)-$(a.substep):"] = usermsg a.memory[:log]["Obs $(a.step):"] = usermsg else error("undefined condition thinkingmode = $thinkingmode $(@__LINE__)") @@ -651,20 +653,25 @@ function work(a::agentReflex, usermsg::String) toolname = nothing toolinput = nothing - prompt = planner_mistral_openorca(a) - @show prompt - respond = sendReceivePrompt(a, prompt) + if a.newplan == true + prompt_plan = planner_mistral_openorca(a) + println("") + @show prompt_plan + respond = sendReceivePrompt(a, prompt_plan, max_tokens=1024) - # sometimes LLM add not-need word I don't want - # plan = split(respond, "<|im_end|>")[1] - # plan = split(plan, "Response:")[1] - # plan = split(plan, "Execution:")[1] - # plan = split(plan, "Result:")[1] - # plan = split(plan, "Recommendation:")[1] - plan = splittext(respond, ["<|im_end|>", "Response:", "Execution:", "Result:", "Recommendation:"]) - plan = replace(plan, "Plan:"=>"") - a.memory[:shortterm]["Plan $(a.attempt):"] = plan - a.memory[:log]["Plan $(a.attempt):"] = plan + # sometimes LLM add not-need word I don't want + plan = splittext(respond, ["<|im_end|>", "Response", "Execution", + "Result", "Recommendation"]) + # plan = replace(plan, "Plan:"=>"") + println("") + @show plan + a.memory[:shortterm]["Plan $(a.attempt):"] = plan + a.memory[:log]["Plan $(a.attempt):"] = plan + a.step = 0 + a.substep = 0 + a.newplan = false + end + actorstate, msgToUser = actor(a) if actorstate == "chatbox" @@ -701,7 +708,9 @@ function work(a::agentReflex, usermsg::String) chunkedtext = chunktext(lessonwithcontext, headers) @show chunkedtext a.memory[:longterm][chunkedtext["Context:"] => chunkedtext["Lesson:"]] + a.newplan = true error("22222222") + end else error("undefied condition, actorstate $actorstate $(@__LINE__)") @@ -749,17 +758,24 @@ function actor(a::agentReflex) msgToUser = nothing totalsteps = checkTotalStepInPlan(a) - - a.step = 0 + while true # Actor loop a.step += 1 + a.substep += 1 @show a.step if a.step <= totalsteps - prompt = actor_mistral_openorca(a) - @show prompt - respond = sendReceivePrompt(a, prompt) + prompt_actor = actor_mistral_openorca(a) + println("") + @show prompt_actor + respond = sendReceivePrompt(a, prompt_actor) + respond = split(respond, "Obs")[1] + respond_actor_raw = respond + @show respond_actor_raw + ind = findfirst(":", respond)[end] + respond = respond[ind+1:end] + respond = "Thought: " * respond # some time LLM not generate a number after headers but I want it if occursin("Act:", respond) @@ -767,18 +783,27 @@ function actor(a::agentReflex) "Act:", "ActInput:", "Obs:", "...", "Answer:", "Conclusion:", "Summary:"] headers = detectCharacters(respond, headerToDetect) - respond = addStepNumber(respond, headers, a.step) + respond = replaceHeaders(respond, headers, a.attempt, a.step, a.substep) end - respond = split(respond, "Obs")[1] respond = split(respond, "<|im_end|>")[1] - @show respond + + respond_actor = respond + println("") + @show respond_actor - headerToDetect = ["Question $(a.step):", "Plan $(a.step):", "Thought $(a.step):", - "Act $(a.step):", "ActInput $(a.step):", "Obs $(a.step):", "...", - "Answer $(a.step):", "Conclusion $(a.step):", "Summary $(a.step):"] + headerToDetect = ["Question $(a.step):", + "Plan $(a.step):", + "Thought $(a.step)-$(a.substep):", + "Act $(a.step)-$(a.substep):", + "ActInput $(a.step)-$(a.substep):", + "Obs $(a.step)-$(a.substep):", + "Check $(a.step)-$(a.substep):", + "Answer $(a.step):", + "Conclusion $(a.step):", + "Summary $(a.step):"] headers = detectCharacters(respond, headerToDetect) chunkedtext = chunktext(respond, headers) @show chunkedtext @@ -786,8 +811,8 @@ function actor(a::agentReflex) # add to memory a.memory[:shortterm] = addShortMem!(a.memory[:shortterm], chunkedtext) - toolname = toolNameBeingCalled(chunkedtext["Act $(a.step):"], a.tools) - toolinput = chunkedtext["ActInput $(a.step):"] + toolname = toolNameBeingCalled(chunkedtext["Act $(a.step)-$(a.substep):"], a.tools) + toolinput = chunkedtext["ActInput $(a.step)-$(a.substep):"] @show toolname @show toolinput @@ -801,11 +826,13 @@ function actor(a::agentReflex) f = a.tools[Symbol(toolname)][:func] toolresult = f(a, toolinput) @show toolresult - a.memory[:shortterm]["Obs $(a.step):"] = toolresult + a.memory[:shortterm]["Obs $(a.step)-$(a.substep):"] = toolresult - go = goNogo(a) + go, reason = goNogo(a) + a.memory[:shortterm]["Check $(a.step)-$(a.substep):"] = reason if go == "No" # in case there is a cancel, go straight to evaluation - a.step = totalsteps + a.step -= 1 + error(113) end end else #TODO finish all steps @@ -1158,8 +1185,10 @@ function goNogo(a) respond = sendReceivePrompt(a, prompt) decision = GeneralUtils.getStringBetweenCharacters(respond, "{", "}") + start = findfirst("}", respond) +1 + reason = respond[start:end] - return decision + return decision, reason end diff --git a/src/llmfunction.jl b/src/llmfunction.jl index d752195..4706f82 100644 --- a/src/llmfunction.jl +++ b/src/llmfunction.jl @@ -1,6 +1,6 @@ module llmfunction -export wikisearch +export wikisearch, winestock using HTTP, JSON3 using GeneralUtils @@ -59,7 +59,41 @@ end +function winestock(a::agentReflex, phrase::T) where {T<:AbstractString} + # result = [ + # Dict( + # "name" => "Louis Latou - Corton-Charlamagne - Chardonnay", + # "description" => "Corton-Charlemagne 2018 is a powerful, complex wine. Its nose is intense, with notes of white stone fruits such as white peach, fresh hazelnut, vanilla, and almond paste. The wine is full-bodied for the palate, and the vanilla is complemented by aromas of fresh almond and lime blossom. The experience ends with a very fine aromatic aftertaste that has subtle saline notes.", + # "price" => "49", + # "ID" => "ws-114" + # ), + # Dict( + # "name" => "Louis Latou - Corton-Charlamagne - Chardonnay", + # "description" => "Corton-Charlemagne 2018 is a powerful, complex wine. Its nose is intense, with notes of white stone fruits such as white peach, fresh hazelnut, vanilla, and almond paste. The wine is full-bodied for the palate, and the vanilla is complemented by aromas of fresh almond and lime blossom. The experience ends with a very fine aromatic aftertaste that has subtle saline notes.", + # "price" => "49", + # "ID" => "ws-114" + # ) + # ] + result = + """ + 1. Name: Louis Latou - Corton-Charlamagne - Chardonnay, + Description: Corton-Charlemagne 2018 is a powerful, complex wine. Its nose is intense, with notes of white stone fruits such as white peach, fresh hazelnut, vanilla, and almond paste. The wine is full-bodied for the palate, and the vanilla is complemented by aromas of fresh almond and lime blossom. The experience ends with a very fine aromatic aftertaste that has subtle saline notes., + Price: 49 dollars, + ID: ws-114 + 2. Name: Chateau de Beaucastel Hommage Jacques Perrin Chateauneuf-du-Pape, + Year: 2019, + Description: The quintessence of Château de Beaucastel, Hommage à Jacques Perrin delights us every year, and the 2019 vintage is no exception. To the eye it offers a splendid deep red color, verging on black. Full of power and supremely elegant, the nose is of magnificent aromatic complexity with notes of black fruit and spices that offer all the characteristic expression of Mourvèdre. Perfectly balanced by an incredible freshness, the mouth is eminently elegant with intense and complex aromas of great subtlety, a full, refined texture, subtle tannins of great finesse, and infinite length. A great classic Hommage à Jacques Perrin., + Price: 42, + ID: ed-23 + 3. Name: M. Chapoutier Ermitage l'Ermite Blanc, + Year: 2017 + Description: Brilliant pale yellow. Complex aromas of vanilla, almonds, dried fruits and linden-tree. The mineraliaty is marked (typical of soil). Very round and rich wine. An elegant balance, of very ripe white fruit aromas (peach and apricot) and light notes of minerality. Beautiful length and complexity., + Price: 13, + ID: wwr-259 + """ + return result +end diff --git a/src/type.jl b/src/type.jl index f135c7f..835b026 100644 --- a/src/type.jl +++ b/src/type.jl @@ -34,9 +34,11 @@ abstract type agent end # messages= [Dict(:role=>"system", :content=> "", :timestamp=> Dates.now()),] messages = Vector{Dict{Symbol, Any}}() tools::Union{Dict, Nothing} = nothing + newplan::Bool = false + attemptlimit::Int = 5 # thinking round limit attempt::Int = 0 # attempted number step::Int = 0 # step number - attemptlimit::Int = 5 # thinking round limit + substep::Int = 0 # step number thinkingmode::Symbol = :no_thinking thinkingFormat::Union{Dict, Nothing} = nothing memory::Dict = Dict( @@ -65,7 +67,7 @@ function agentReflex( """, :sommelier => """ - You are a sommelier at an online wine reseller who always help users choosing their wine. + You are a sommelier at an online wine reseller who always help users choosing their wine from your inventory. You don't know other people personal info previously. """, # :sommelier => @@ -85,7 +87,8 @@ function agentReflex( ), thinkingFormat::Dict=Dict( :react=> - """Use the following format: + """ + Use the following format: Question: the input question your user is asking and you must answer Plan: first you should always think about the question and the info you have thoroughly then extract and devise a complete plan to find the answer (pay attention to variables and their corresponding numerals). Thought: ask yourself do you have all the info you need? And what to do according to the plan (pay attention to correct numeral calculation and commonsense). @@ -98,18 +101,21 @@ function agentReflex( Begin!""", :planner=> - """Use the following format: + """ + Use the following format: Stimulus: the input user gives to you and you must respond - Plan: first you should always think about the stimulus, the info you need and the info you have thoroughly then extract and devise a small step by step plan (pay attention to correct numeral calculation and commonsense). + Plan: first you should always think about the stimulus, the info you need and the info you have thoroughly then extract and devise a step by step plan (pay attention to correct numeral calculation and commonsense). + p.s.1 each step should be a single question/action. + p.s.2 Do not respond yet. """, :actor=> - """ - Use the following format: - Thought: think about how to do step {step} of the plan? (pay attention to correct numeral calculation and commonsense). - Act: the action to take that match your thought, should be one of [{toolnames}] - ActInput: the input to the action (pay attention to the tool's input) - Obs: the result of the action - """, + """ + Use the following format: + Thought: think about what to do according to step {step} of the plan? (pay attention to correct numeral calculation and commonsense). + Act: the action to take that match your thought, should be one of [{toolnames}] + ActInput: the input to the action (pay attention to the tool's input) + Obs: the result of the action + """, ), tools::Dict=Dict( :chatbox=>Dict( diff --git a/src/utils.jl b/src/utils.jl index 3be40fe..a1c2ad0 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -2,7 +2,7 @@ module utils export makeSummary, sendReceivePrompt, chunktext, extractStepFromPlan, checkTotalStepInPlan, detectCharacters, findDetectedCharacter, extract_number, toolNameBeingCalled, - chooseThinkingMode, conversationSummary, checkReasonableness, addStepNumber, + chooseThinkingMode, conversationSummary, checkReasonableness, replaceHeaders, addShortMem!, splittext using UUIDs, Dates, DataStructures @@ -344,6 +344,7 @@ function chooseThinkingMode(a::agentReflex, usermsg::String) Choose one of the following choices: If you don't need to use tools or actions to respond to the stimulus say, "{no}". If you need tools or actions to respond to the stimulus say, "{yes}". + If the user want to get wine say, "{yes}". <|im_end|> <|im_start|>user @@ -485,21 +486,6 @@ function checkReasonableness(userMsg::String, context::String, tools) end -""" - Add step number to header in a text -""" -function addStepNumber(text::T, headers, step::Int) where {T<:AbstractString} - newtext = text - for i in headers - if occursin(i[:char], newtext) - new = replace(i[:char], ":"=> " $step:") - newtext = replace(newtext, i[:char]=>new ) - end - end - return newtext -end - - """ Add chunked text to a short term memory of a chat agent @@ -559,20 +545,62 @@ end +""" + Add step number to header in a text +""" +function addStepNumber(text::T, headers, step::Int) where {T<:AbstractString} + newtext = text + for i in headers + if occursin(i[:char], newtext) + new = replace(i[:char], ":"=> " $step:") + newtext = replace(newtext, i[:char]=>new ) + end + end + return newtext +end +function addStepNumber(text::T, headers, step::Int, substep::Int) where {T<:AbstractString} + newtext = text + for i in headers + if occursin(i[:char], newtext) + new = replace(i[:char], ":"=> " $step-$substep:") + newtext = replace(newtext, i[:char]=>new ) + end + end + return newtext +end +""" Add step number to header in a text +Args: + text = a text you want to split + headers = a list of keywords you want to add step and substep to +Return: + a leftmost text after split +# Example +```jldoctest +julia> text = "Consider the type of food, occasion and temperature at the serving location." +julia> headers = ["Thought", "Act"] +``` +""" +function replaceHeaders(text::T, headers, attempt::Int, step::Int, substep::Int) where {T<:AbstractString} + newtext = text + for i in headers + header = i[:char][1:end-1] + if occursin(header, newtext) + startind = findfirst(i[:char], newtext)[1] + stopind = findnext(":", newtext, startind+1)[end] + word = newtext[startind: stopind] + newtext = replace(newtext, word=> "$header $attempt-$step-$substep:") + + end + end - - - - - - - + return newtext +end