From 11cae89eedfbe68e350359313fbfde498aa54ffc Mon Sep 17 00:00:00 2001 From: tonaerospace Date: Wed, 20 Dec 2023 00:21:49 +0000 Subject: [PATCH] update --- src/interface.jl | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/interface.jl b/src/interface.jl index 59e0596..f19c687 100755 --- a/src/interface.jl +++ b/src/interface.jl @@ -203,6 +203,7 @@ function planner_mistral_openorca(a::agentReflex) Plan: first you should always think about your conversation with the user and your earlier work thoroughly then extract and devise a complete, step by step plan to achieve your objective (pay attention to correct numeral calculation and commonsense). P.S.1 each step of the plan should be a single action. P.S.2 ask the user if you don't have info. + P.S.3 mark a completed step with done keyword. <|/s|> $conversation <|assistant|> @@ -557,6 +558,24 @@ function conversation(a::agentReflex, usermsg::String; attemptlimit::Int=3) return response end +""" Direct conversation is not an agent, messages does not pass through logic loop + but goes directly to LLM. + +""" +function directconversation(a::agentReflex, usermsg::String) + response = nothing + + _ = addNewMessage(a, "user", usermsg) + if isusetools # use tools before responseing + workstate, response = work(a) + end + + response = chat_mistral_openorca(a) + response = removeTrailingCharacters(response) + _ = addNewMessage(a, "assistant", response) + return response +end + """ Continuously run llm functions except when llm is getting Answer: or chatbox. There are many work() depend on thinking mode.