module interface export agentReact, agentReflex, addNewMessage, clearMessage, removeLatestMsg, generatePrompt_tokenPrefix, generatePrompt_tokenSuffix, conversation, work, detectCharacters, chunktext, findDetectedCharacter, wikisearch, sendReceivePrompt using JSON3, DataStructures, Dates, UUIDs, HTTP using CommUtils, GeneralUtils using ..type # ---------------------------------------------------------------------------- # # pythoncall setting # # ---------------------------------------------------------------------------- # # Ref: https://github.com/JuliaPy/PythonCall.jl/issues/252 # by setting the following variables, PythonCall will use system python or conda python and # packages installed by system or conda # if these setting are not set (comment out), PythonCall will use its own python and package that # installed by CondaPkg (from env_preparation.jl) # ENV["JULIA_CONDAPKG_BACKEND"] = "Null" # systemPython = split(read(`which python`, String), "\n")[1] # ENV["JULIA_PYTHONCALL_EXE"] = systemPython # find python location with $> which python ex. raw"/root/conda/bin/python" # using PythonCall # const py_agents = PythonCall.pynew() # const py_llms = PythonCall.pynew() # function __init__() # # PythonCall.pycopy!(py_cv2, pyimport("cv2")) # # equivalent to from urllib.request import urlopen in python # PythonCall.pycopy!(py_agents, pyimport("langchain.agents")) # PythonCall.pycopy!(py_llms, pyimport("langchain.llms")) # end #------------------------------------------------------------------------------------------------100 """ add new message to agent # Example ```jldoctest julia> addNewMessage(agent1, "user", "Where should I go to buy snacks") ```` """ function addNewMessage(a::T1, role::String, content::T2) where {T1<:agent, T2<:AbstractString} if role ∉ a.availableRole # guard against typo error("role is not in agent.availableRole") end # check whether user messages exceed limit userMsg = 0 for i in a.messages if i[:role] == "user" userMsg += 1 end end messageleft = 0 if userMsg > a.maxUserMsg # delete all conversation clearMessage(a) messageleft = a.maxUserMsg else userMsg += 1 d = Dict(:role=> role, :content=> content, :timestamp=> Dates.now()) push!(a.messages, d) messageleft = a.maxUserMsg - userMsg end return messageleft end function clearMessage(a::T) where {T<:agent} for i in eachindex(a.messages) if length(a.messages) > 1 # system instruction will NOT be deleted pop!(a.messages) else break end end a.thought = "nothing" end function removeLatestMsg(a::T) where {T<:agent} if length(a.messages) > 1 pop!(a.messages) end end # function generatePrompt_mistral_openorca(a::T, usermsg::String, role::Symbol) where {T<:agent} # prompt = # """ # <|im_start|>system # {systemMsg} # <|im_end|> # Here are the context for the question: # {context} # """ # prompt = replace(prompt, "{systemMsg}" => a.roles[role]) # toolnames = "" # toollines = "" # for (toolname, v) in a.tools # toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" # toollines *= toolline # toolnames *= "$toolname," # end # prompt = replace(prompt, "{toolnames}" => toolnames) # prompt = replace(prompt, "{tools}" => toollines) # prompt = replace(prompt, "{context}" => a.context) # prompt *= "<|im_start|>user\n" * usermsg * "\n<|im_end|>\n" # prompt *= "<|im_start|>assistant\n" # return prompt # end # function generatePrompt_mistral_openorca(a::T, usermsg::String, # thinkingMode::Symbol=:nothinking) where {T<:agent} # prompt = # """ # <|im_start|>system # {systemMsg} # You have access to the following tools: # {tools} # {thinkingMode} # <|im_end|> # Here are the context for the question: # {context} # """ # prompt = replace(prompt, "{systemMsg}" => a.roles[a.role]) # prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode]) # toolnames = "" # toollines = "" # for (toolname, v) in a.tools # toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" # toollines *= toolline # toolnames *= "$toolname," # end # prompt = replace(prompt, "{toolnames}" => toolnames) # prompt = replace(prompt, "{tools}" => toollines) # prompt = replace(prompt, "{context}" => a.context) # prompt *= "<|im_start|>user\nQuestion: " * usermsg * "\n<|im_end|>\n" # prompt *= "<|im_start|>assistant\n" # return prompt # end function generatePrompt_mistral_openorca(a::T, usermsg::String, thinkingMode::Symbol=:nothinking) where {T<:agent} prompt = """ <|im_start|>system {systemMsg} {tools} {thinkingMode} <|im_end|> Here are the context for the assignment: {context} """ prompt = replace(prompt, "{systemMsg}" => a.roles[a.role]) prompt = replace(prompt, "{thinkingMode}" => a.thinkingMode[thinkingMode]) toolnames = "" toollines = "" for (toolname, v) in a.tools toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" toollines *= toolline toolnames *= "$toolname," end prompt = replace(prompt, "{toolnames}" => toolnames) prompt = replace(prompt, "{context}" => a.context) prompt *= "<|im_start|>user\nAssignment: " * usermsg * "\n<|im_end|>\n" prompt *= "<|im_start|>assistant\n" return prompt end function chat_mistral_openorca(a::agentReflex, usermsg::String) """ general prompt format: " <|im_start|>system {role} {tools} {thinkingFormat} <|im_end|> {context} <|im_start|>user {usermsg} <|im_end|> <|im_start|>assistant " Note: {context} = " {earlierConversation} {env status} {shortterm memory} {longterm memory} " """ prompt = """ <|im_start|>system {role} {tools} {thinkingFormat} <|im_end|> {context} <|im_start|>user {usermsg} <|im_end|> <|im_start|>assistant """ prompt = replace(prompt, "{role}" => a.roles[a.role]) prompt = replace(prompt, "{thinkingFormat}" => "") context = """ {earlierConversation} {current status} {longterm memory} """ context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)") context = replace(context, "{current status}" => "") context = replace(context, "{longterm memory}" => "") prompt = replace(prompt, "{context}" => context) prompt = replace(prompt, "{usermsg}" => "Assignment: $usermsg") return prompt end function planner_mistral_openorca(a::agentReflex, usermsg::String) """ general prompt format: " <|im_start|>system {role} {tools} {thinkingFormat} <|im_end|> {context} <|im_start|>user {usermsg} <|im_end|> <|im_start|>assistant " Note: {context} = " {earlierConversation} {env status} {shortterm memory} {longterm memory} " """ prompt = """ <|im_start|>system {role} {tools} {thinkingFormat} <|im_end|> {context} <|im_start|>user {usermsg} <|im_end|> <|im_start|>assistant """ prompt = replace(prompt, "{role}" => a.roles[a.role]) prompt = replace(prompt, "{thinkingFormat}" => a.thinkingFormat[:planner]) toolnames = "" toollines = "" for (toolname, v) in a.tools toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" toollines *= toolline toolnames *= "$toolname," end prompt = replace(prompt, "{toolnames}" => toolnames) prompt = replace(prompt, "{tools}" => "You have access to the following tools:\n$toollines") context = """ {earlierConversation} {current status} {longterm memory} """ context = replace(context, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)") context = replace(context, "{current status}" => "") context = replace(context, "{longterm memory}" => "") prompt = replace(prompt, "{context}" => context) prompt = replace(prompt, "{usermsg}" => "Assignment: $usermsg") return prompt end """ Chat with llm. ```jldoctest julia> using JSON3, UUIDs, Dates, FileIO, CommUtils, ChatAgent julia> mqttClientSpec = ( clientName= "someclient", # name of this client clientID= "$(uuid4())", broker= "mqtt.yiem.ai", pubtopic= (imgAI="img/api/v0.0.1/gpu/request", txtAI="txt/api/v0.1.0/gpu/request"), subtopic= (imgAI="agent/api/v0.1.0/img/respond", txtAI="agent/api/v0.1.0/txt/respond"), keepalive= 30, ) julia> msgMeta = Dict( :msgPurpose=> "updateStatus", :from=> "agent", :to=> "llmAI", :requestrespond=> "request", :sendto=> "", # destination topic :replyTo=> "agent/api/v0.1.0/txt/respond", # requester ask responder to send reply to this topic :repondToMsgId=> "", # responder is responding to this msg id :taskstatus=> "", # "complete", "fail", "waiting" or other status :timestamp=> Dates.now(), :msgId=> "$(uuid4())", ) julia> newAgent = ChatAgent.agentReact( "Jene", mqttClientSpec, role=:assistant_react, msgMeta=msgMeta ) julia> respond = ChatAgent.conversation(newAgent, "Hi! how are you?") ``` """ function conversation(a::T, usermsg::String) where {T<:agent} respond = nothing if a.thought != "nothing" # continue thought _ = addNewMessage(a, "user", usermsg) a.thought *= "Obs $(a.thinkinground): $usermsg\n" prompt = a.thought respond = work(a, prompt) else # new thought thinkingmode = chooseThinkingMode(a, usermsg) @show thinkingmode if thinkingmode == :no_thinking a.context = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details _ = addNewMessage(a, "user", usermsg) prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode) @show prompt respond = sendReceivePrompt(a, prompt) respond = split(respond, "<|im_end|>")[1] respond = replace(respond, "\n" => "") _ = addNewMessage(a, "assistant", respond) @show respond elseif thinkingmode == :thinking a.context = conversationSummary(a) _ = addNewMessage(a, "user", usermsg) prompt = generatePrompt_mistral_openorca(a, usermsg, thinkingmode) respond = work(a, prompt) else error("undefined condition thinkingmode = $thinkingmode") end end return respond end """ Continuously run llm functions except when llm is getting Answer: or chatbox. There are many work() depend on thinking mode. """ function work(a::T, prompt::String, maxround::Int=3) where {T<:agent} respond = nothing while true a.thinkinground += 1 @show a.thinkinground toolname = nothing toolinput = nothing if a.thinkinground > a.thinkingroundlimit a.thought *= "Thought $(a.thinkinground): I think I know the answer." prompt = a.thought end @show prompt respond = sendReceivePrompt(a, prompt) headerToDetect = nothing if a.thinkinground == 1 try respond = split(respond, "Obs:")[1] headerToDetect = ["Question:", "Plan:", "Thought:", "Act:", "ActInput:", "Obs:", "...", "Answer:", "Conclusion:", "Summary:"] catch end else try respond = split(respond, "Obs $(a.thinkinground):")[1] headerToDetect = ["Question $(a.thinkinground):", "Plan $(a.thinkinground):", "Thought $(a.thinkinground):", "Act $(a.thinkinground):", "ActInput $(a.thinkinground):", "Obs $(a.thinkinground):", "...", "Answer:", "Conclusion:", "Summary:"] catch end end @show respond headers = detectCharacters(respond, headerToDetect) chunkedtext = chunktext(respond, headers) Answer = findDetectedCharacter(headers, "Answer:") AnswerInd = length(Answer) != 0 ? Answer[1] : nothing Act = findDetectedCharacter(headers, "Act $(a.thinkinground):") if length(Answer) == 1 && length(Act) == 0 a.thought = "nothing" # assignment finished, no more thought a.context = "nothing" a.thinkinground = 0 respond = chunkedtext[AnswerInd][:body] respond = replace(respond, "<|im_end|>"=>"") _ = addNewMessage(a, "assistant", respond) break else # check for tool being called ActHeader = a.thinkinground == 1 ? "Act:" : "Act $(a.thinkinground):" if length(findDetectedCharacter(headers, ActHeader)) != 0 # check whether there is Act: in a respond ActInd = findDetectedCharacter(headers, ActHeader)[1] toolname = toolNameBeingCalled(chunkedtext[ActInd][:body], a.tools) end ActInputHeader = a.thinkinground == 1 ? "ActInput:" : "ActInput $(a.thinkinground):" if length(findDetectedCharacter(headers, ActInputHeader)) != 0 # check whether there is ActInput: in a respond ActInputInd = findDetectedCharacter(headers, ActInputHeader)[1] toolinput = chunkedtext[ActInputInd][:body] end # clean up if occursin(" \"", toolinput) toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " \"", "\"\n") else toolinput = GeneralUtils.getStringBetweenCharacters(toolinput, " ", "\n") end @show toolname @show toolinput if toolname === nothing || toolinput === nothing println("toolname $toolname toolinput $toolinput retry thinking") a.thinkinground -= 1 continue end if a.thought == "nothing" thought = "" for i in chunkedtext header = i[:header] header = replace(header, ":"=>" $(a.thinkinground):") # add number so that llm not confused body = i[:body] thought *= "$header $body" end a.thought = prompt * thought else a.thought *= respond end if toolname == "chatbox" # chat with user a.thought *= toolinput respond = toolinput _ = addNewMessage(a, "assistant", respond) break else # function call f = a.tools[Symbol(toolname)][:func] _result = f(toolinput) if _result != "No info available." #TODO for use with wikisearch(). Not good for other tools _result = makeSummary(a, _result) end result = "Obs $(a.thinkinground): $_result\n" a.thought *= result prompt = a.thought end end end @show respond return respond end function conversation(a::agentReflex, usermsg::String; thinkingroundlimit::Int=3) a.thinkingroundlimit = thinkingroundlimit respond = nothing # determine thinking mode a.thinkingmode = chooseThinkingMode(a, usermsg) @show a.thinkingmode if a.thinkingmode == :no_thinking a.earlierConversation = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details _ = addNewMessage(a, "user", usermsg) prompt = chat_mistral_openorca(a, usermsg) #TODO rewrite this function @show prompt respond = sendReceivePrompt(a, prompt) respond = split(respond, "<|im_end|>")[1] respond = replace(respond, "\n" => "") _ = addNewMessage(a, "assistant", respond) @show respond else respond = work(a, usermsg) end return respond end #WORKING function work(a::agentReflex, usermsg::String) if a.thinkingmode == :new_thinking a.earlierConversation = conversationSummary(a) _ = addNewMessage(a, "user", usermsg) elseif a.thinkingmode == :continue_thinking _ = addNewMessage(a, "user", usermsg) a.thought *= "Obs $(a.attempt): $usermsg\n" else error("undefined condition thinkingmode = $thinkingmode") end while true # plan a.attempt += 1 @show a.attempt toolname = nothing toolinput = nothing prompt = planner_mistral_openorca(a, usermsg) @show prompt respond = sendReceivePrompt(a, prompt) plan = split(respond, "<|im_end|>")[1] @show respond step = 0 while true step += 1 isstep, stepdetail = extractStepFromPlan(a, plan, step) @show isstep @show stepdetail if isstep error("work done") end # evaluate end end """ make a conversation summary. ```jldoctest julia> conversation = [ Dict(:role=> "user", :content=> "I would like to get a bottle of wine", :timestamp=> Dates.now()), Dict(:role=> "assistant", :content=> "What kind of Thai dishes are you having?", :timestamp=> Dates.now()), Dict(:role=> "user", :content=> "It a pad thai.", :timestamp=> Dates.now()), Dict(:role=> "assistant", :content=> "Is there any special occasion for this event?", :timestamp=> Dates.now()), Dict(:role=> "user", :content=> "We'll hold a wedding party at the beach.", :timestamp=> Dates.now()), Dict(:role=> "assistant", :content=> "What is your preferred type of wine?", :timestamp=> Dates.now()), Dict(:role=> "user", :content=> "I like dry white wine with medium tanins.", :timestamp=> Dates.now()), Dict(:role=> "assistant", :content=> "What is your preferred price range for this bottle of wine?", :timestamp=> Dates.now()), Dict(:role=> "user", :content=> "lower than 50 dollars.", :timestamp=> Dates.now()), Dict(:role=> "assistant", :content=> "Based on your preferences and our stock, I recommend the following two wines for you: 1. Pierre Girardin \"Murgers des Dents de Chien\" - Saint-Aubin 1er Cru (17 USD) 2. Etienne Sauzet'Les Perrieres' - Puligny Montrachet Premier Cru (22 USD) The first wine, Pierre Girardin \"Murgers des Dents de Chien\" - Saint-Aubin 1er Cru, is a great choice for its affordable price and refreshing taste. It pairs well with Thai dishes and will be perfect for your beach wedding party. The second wine, Etienne Sauzet'Les Perrieres' - Puligny Montrachet Premier Cru, offers a more complex flavor profile and slightly higher price point, but still remains within your budget. Both wines are suitable for serving at 22 C temperature.", :timestamp=> Dates.now()), ] julia> summary = conversationSummary(conversation) ``` """ function conversationSummary(a::T) where {T<:agent} prompt = """ <|im_start|>system You talked with a user earlier. Now you make a detailed bullet summary of the conversation from your perspective. Here are the conversation: {conversation} <|im_end|> """ conversation = "" summary = "nothing" if length(a.messages)!= 0 for msg in a.messages role = msg[:role] content = msg[:content] if role == "user" conversation *= "$role: $content\n" elseif role == "assistant" conversation *= "I: $content\n" else error("undefied condition role = $role") end end prompt = replace(prompt, "{conversation}" => conversation) result = sendReceivePrompt(a, prompt) summary = result === nothing ? "nothing" : result summary = replace(summary, "<|im_end|>" => "") if summary[1:1] == "\n" summary = summary[2:end] end end return summary end function makeSummary(a::T1, input::T2) where {T1<:agent, T2<:AbstractString} summary = "Nothing." prompt = """ <|im_start|>system Your need to determine now whether you can make a summary of user's text. You have the following choices: If you cannot make a summary say, "{no}". If you can make a summary say, "{yes}". <|im_end|> <|im_start|>user {input} <|im_end|> <|im_start|>assistant """ prompt = replace(prompt, "{input}" => usermsg) result = sendReceivePrompt(a, prompt) result = GeneralUtils.getStringBetweenCharacters(result, "{", "}") if result == "yes" prompt = """ <|im_start|>system You are a helpful assistant. Your job is to make a concise summary of user's text. <|im_end|> <|im_start|>user {input} <|im_end|> <|im_start|>assistant """ prompt = replace(prompt, "{input}" => input) result = sendReceivePrompt(a, prompt) summary = replace(result, "<|im_end|>" => "") if summary[1:1] == "\n" summary = summary[2:end] end end return summary end function chooseThinkingMode(a::T, usermsg::String) where {T<:agent} thinkingMode = nothing if a.thought != "nothing" thinkingMode = :continue_thinking else prompt = """ <|im_start|>system {systemMsg} You have access to the following tools: {tools} Your need to determine now whether you will use tools or actions to answer the assignment. You have the following choices: If you don't need tools or actions to fininsh the assignment say, "{no}". If you need tools or actions to finish the assignment say, "{yes}". <|im_end|> <|im_start|>user {input} <|im_end|> <|im_start|>assistant """ toollines = "" for (toolname, v) in a.tools if toolname ∉ ["chatbox", "nothing"] toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" toollines *= toolline end end prompt = replace(prompt, "{systemMsg}" => a.roles[a.role]) prompt = replace(prompt, "{tools}" => toollines) prompt = replace(prompt, "{input}" => usermsg) result = sendReceivePrompt(a, prompt) willusetools = GeneralUtils.getStringBetweenCharacters(result, "{", "}") thinkingMode = willusetools == "yes" ? :thinking : :no_thinking end return thinkingMode end function chooseThinkingMode(a::agentReflex, usermsg::String) thinkingmode = nothing if a.thoughtlog != "nothing" thinkingmode = :continue_thinking else prompt = """ <|im_start|>system {systemMsg} You have access to the following tools: {tools} Your job is to determine whether you will use tools or actions to finish the assignment. Choose one of the following choices: If you don't need tools or actions to fininsh the assignment say, "{no}". If you need tools or actions to finish the assignment say, "{yes}". <|im_end|> <|im_start|>user {input} <|im_end|> <|im_start|>assistant """ toollines = "" for (toolname, v) in a.tools if toolname ∉ ["chatbox", "nothing"] toolline = "$toolname: $(v[:description]) $(v[:input]) $(v[:output])\n" toollines *= toolline end end prompt = replace(prompt, "{systemMsg}" => a.roles[a.role]) prompt = replace(prompt, "{tools}" => toollines) prompt = replace(prompt, "{input}" => usermsg) result = sendReceivePrompt(a, prompt) willusetools = GeneralUtils.getStringBetweenCharacters(result, "{", "}") thinkingmode = willusetools == "yes" ? :new_thinking : :no_thinking end return thinkingmode end # function identifyUserIntention(a::T, usermsg::String) where {T<:agent} # prompt = # """ # <|im_start|>system # You are a helpful assistant. Your job is to determine intention of the question. # You have the following choices: # If the user question is about general conversation say, "{chat}". # If the user question is about getting wine say, "{wine}". # <|im_end|> # Here are the context for the question: # {context} # <|im_start|>user # {input} # <|im_end|> # <|im_start|>assistant # """ # prompt = replace(prompt, "{context}" => "") # prompt = replace(prompt, "{input}" => usermsg) # result = sendReceivePrompt(a, prompt) # answer = result === nothing ? nothing : GeneralUtils.getStringBetweenCharacters(result, "{", "}") # return answer # end """ Send a msg to registered mqtt topic within mqttClient. ```jldoctest julia> using JSON3, UUIDs, Dates, FileIO, CommUtils, ChatAgent julia> mqttClientSpec = ( clientName= "someclient", # name of this client clientID= "$(uuid4())", broker= "mqtt.yiem.ai", pubtopic= (imgAI="img/api/v0.0.1/gpu/request", txtAI="txt/api/v0.1.0/gpu/request"), subtopic= (imgAI="agent/api/v0.1.0/img/respond", txtAI="agent/api/v0.1.0/txt/respond"), keepalive= 30, ) julia> msgMeta = Dict( :msgPurpose=> "updateStatus", :from=> "agent", :to=> "llmAI", :requestrespond=> "request", :sendto=> "", # destination topic :replyTo=> "agent/api/v0.1.0/txt/respond", # requester ask responder to send reply to this topic :repondToMsgId=> "", # responder is responding to this msg id :taskstatus=> "", # "complete", "fail", "waiting" or other status :timestamp=> Dates.now(), :msgId=> "$(uuid4())", ) julia> newAgent = ChatAgent.agentReact( "Jene", mqttClientSpec, role=:assistant_react, msgMeta=msgMeta ) ``` """ function sendReceivePrompt(a::T, prompt::String; timeout::Int=120) where {T<:agent} a.msgMeta[:msgId] = "$(uuid4())" # new msg id for each msg msg = Dict( :msgMeta=> a.msgMeta, :txt=> prompt, ) payloadChannel = Channel(1) # send prompt CommUtils.request(a.mqttClient, msg) starttime = Dates.now() result = nothing while true timepass = (Dates.now() - starttime).value / 1000.0 CommUtils.mqttRun(a.mqttClient, payloadChannel) if isready(payloadChannel) topic, payload = take!(payloadChannel) if payload[:msgMeta][:repondToMsgId] == msg[:msgMeta][:msgId] result = haskey(payload, :txt) ? payload[:txt] : nothing break end elseif timepass <= timeout # skip, within waiting period elseif timepass > timeout println("sendReceivePrompt timeout $timepass/$timeout") result = nothing break else error("undefined condition. timepass=$timepass timeout=$timeout $(@__LINE__)") end end return result end """ Extract toolname from text. ```jldoctest julia> text = " internetsearch\n" julia> tools = Dict( :internetsearch=>Dict( :name => "internetsearch", :description => "Useful for when you need to search the Internet", :input => "Input should be a search query.", :output => "", # :func => internetsearch # function ), :chatbox=>Dict( :name => "chatbox", :description => "Useful for when you need to ask a customer what you need to know or to talk with them.", :input => "Input should be a conversation to customer.", :output => "" , ), ) julia> toolname = toolNameBeingCalled(text, tools) ``` """ function toolNameBeingCalled(text::T, tools::Dict) where {T<:AbstractString} toolNameBeingCalled = nothing for (k, v) in tools toolname = String(k) if contains(text, toolname) toolNameBeingCalled = toolname break end end return toolNameBeingCalled end #TODO function checkReasonableness(userMsg::String, context::String, tools) # Ref: https://www.youtube.com/watch?v=XV4IBaZqbps prompt = """ <|im_start|>system You are a helpful assistant. Your job is to check the reasonableness of user assignments. If the user assignment can be answered given the tools available say, "This is a reasonable assignment". If the user assignment cannot be answered then provide some feedback to the user that may improve their assignment. Here is the context for the assignment: {context} <|im_end|> <|im_start|>user {assignment} <|im_end|> <|im_start|>assistant """ context = "You have access to the following tools: WineStock: useful for when you need to find info about wine by matching your description, price, name or ID. Input should be a search query with as much details as possible." prompt = replace(prompt, "{assignment}" => userMsg) prompt = replace(prompt, "{context}" => context) output_py = llm( prompt, max_tokens=512, temperature=0.1, # top_p=top_p, echo=false, stop=["", "<>", ], ) _output_jl = pyconvert(Dict, output_py); output = pyconvert(Dict, _output_jl["choices"][1]); output["text"] end """ Detect given characters. Output is a list of named tuple of detected char. ```jldoctest julia> text = "I like to eat apples and use utensils." julia> characters = ["eat", "use", "i"] julia> result = detectCharacters(text, characters) 4-element Vector{Any}: (char = "i", start = 4, stop = 4) (char = "eat", start = 11, stop = 13) (char = "use", start = 26, stop = 28) (char = "i", start = 35, stop = 35) ``` """ function detectCharacters(text::T1, characters::Vector{T2}) where {T1<:AbstractString, T2<:AbstractString} result = [] for i in eachindex(text) for char in characters l = length(char) char_startInd = i char_endInd = i+l-1 # -1 because Julia use inclusive index if char_endInd > length(text) # skip else try # some time StringIndexError: invalid index [535], valid nearby indices [534]=>'é', [536]=>' ' if text[char_startInd: char_endInd] == char push!(result, (char=char, start=char_startInd, stop=char_endInd)) end catch end end end end return result end """ Find a given character from a vector of named tuple. Output is character location index inside detectedCharacters ```jldoctest julia a = [ (char = "i", start = 4, stop = 4) (char = "eat", start = 11, stop = 13) (char = "use", start = 26, stop = 28) (char = "i", start = 35, stop = 35) ] julia> findDetectedCharacter(a, "i") [1, 4] ``` """ function findDetectedCharacter(detectedCharacters, character) allchar = [i[1] for i in detectedCharacters] return findall(isequal.(allchar, character)) end """ Chunk a text into smaller pieces by header. ```jldoctest julia> using ChatAgent julia> text = "Plan: First, we need to find out what kind of wine the user wants." julia> headers = ChatAgent.detectCharacters(text, ["Nope", "sick", "First", "user", "Then", ]) 3-element Vector{Any}: (char = "First", start = 7, stop = 11) (char = "user", start = 56, stop = 59) (char = "Then", start = 102, stop = 105) julia> chunkedtext = ChatAgent.chunktext(text, headers) 2-element Vector{Any}: (header = "First", body = ", we need to find out what kind of wine the ") (header = "user", body = " wants.") ``` """ function chunktext(text::T, headers) where {T<:AbstractString} result = [] for (i, v) in enumerate(headers) if i < length(headers) nextheader = headers[i+1] body = text[v[:stop]+1: nextheader[:start]-1] push!(result, (header=v[:char], body=body)) else body = text[v[:stop]+1: end] push!(result, (header=v[:char], body=body)) end end return result end function extractStepFromPlan(a::agent, plan::T, stepToExtract::Int) where {T<:AbstractString} prompt = """ <|im_start|>system You are a helpful assistant. Your job is to determine whether step {$stepToExtract} is in the user plan. Choose one of the following choices: If there isn't say, {no}. If there is say, {yes}. {copy the step and put it here} <|im_end|> <|im_start|>user $plan <|im_end|> <|im_start|>assistant """ isStep = nothing step = nothing respond = sendReceivePrompt(a, prompt) isStep = GeneralUtils.getStringBetweenCharacters(respond, "{", "}") if isStep == "no" isStep = false step = "nothing" elseif isStep == "yes" isStep = true step = split(respond, "{yes}")[end] else error("undefined condition. isStep=$isStep $(@__LINE__)") end return isStep, step end end # module