diff --git a/src/interface.jl b/src/interface.jl index 304a00f..c662a71 100755 --- a/src/interface.jl +++ b/src/interface.jl @@ -224,7 +224,6 @@ function agentReflex( """, ), thinkingFormat::Dict=Dict( - :no_thinking=> "", :react=> """Use the following format: Question: the input question your user is asking and you must answer @@ -460,8 +459,7 @@ function generatePrompt_mistral_openorca(a::T, usermsg::String, return prompt end - -function genPrompt_mistral_openorca_planning(a::agentReflex, usermsg::String) #WORKING +function genPrompt_mistral_openorca(a::agentReflex, usermsg::String) #WORKING """ general prompt format: @@ -480,8 +478,74 @@ function genPrompt_mistral_openorca_planning(a::agentReflex, usermsg::String) #W " Note: - {context} = {earlierConversation} + {current status} + - {shortterm memory} + {longterm memory} + {context} = + " + {earlierConversation} + {current status} + {shortterm memory} + {longterm memory} + " + """ + + prompt = + """ + <|im_start|>system + {role} + {tools} + {thinkingFormat} + <|im_end|> + {context} + <|im_start|>user + {usermsg} + <|im_end|> + <|im_start|>assistant + + """ + prompt = replace(prompt, "{role}" => a.roles[a.role]) + prompt = replace(prompt, "{thinkingFormat}" => "") + + context = + """ + {earlierConversation} + {current status} + {longterm memory} + """ + prompt = replace(prompt, "{earlierConversation}" => "My earlier talk with the user:\n$(a.earlierConversation)") + prompt = replace(prompt, "{current status}" => "") + prompt = replace(prompt, "{longterm memory}" => "") + + prompt *= "<|im_start|>user\nQuestion: " * usermsg * "\n<|im_end|>\n" + prompt *= "<|im_start|>assistant\n" + + return prompt +end + +function genPrompt_planning_mistral_openorca(a::agentReflex, usermsg::String) #WORKING + """ + general prompt format: + + " + <|im_start|>system + {role} + {tools} + {thinkingFormat} + <|im_end|> + {context} + <|im_start|>user + {usermsg} + <|im_end|> + <|im_start|>assistant + + " + + Note: + {context} = + " + {earlierConversation} + {current status} + {shortterm memory} + {longterm memory} + " """ prompt = @@ -731,7 +795,7 @@ function conversation(a::agentReflex, usermsg::String; thinkingroundlimit::Int=3 if a.thinkingMode == :no_thinking a.earlierConversation = conversationSummary(a) #TODO should be long conversation before use summary because it leaves out details _ = addNewMessage(a, "user", usermsg) - prompt = generatePrompt_mistral_openorca(a, usermsg) + prompt = genPrompt_mistral_openorca(a, usermsg) #TODO rewrite this function @show prompt respond = sendReceivePrompt(a, prompt) respond = split(respond, "<|im_end|>")[1] @@ -763,7 +827,7 @@ function work(a::agentReflex, usermsg::String) @show a.thinkinground toolname = nothing toolinput = nothing - plan = generatePrompt_planning(a, prompt) + plan = genPrompt_planning_mistral_openorca(a, prompt) @show plan # for # # execute