diff --git a/Manifest.toml b/Manifest.toml
index 253dfd9..ab1cc66 100755
--- a/Manifest.toml
+++ b/Manifest.toml
@@ -2,7 +2,7 @@
julia_version = "1.10.0"
manifest_format = "2.0"
-project_hash = "f50669eb396930635cc23f4185cc7ad08cee3c08"
+project_hash = "f3efeb7273bc139c892115cf6041e156e511140f"
[[deps.AbstractFFTs]]
deps = ["LinearAlgebra"]
diff --git a/Project.toml b/Project.toml
index eb25519..b03c629 100755
--- a/Project.toml
+++ b/Project.toml
@@ -12,5 +12,6 @@ GeneralUtils = "c6c72f09-b708-4ac8-ac7c-2084d70108fe"
HTTP = "cd3eb016-35fb-5094-929b-558a96fad6f3"
JSON3 = "0f8b85d8-7281-11e9-16c2-39a750bddbf1"
PythonCall = "6099a3de-0909-46bc-b1f4-468b9a2dfc0d"
+Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
URIs = "5c2747f8-b7ea-4ff2-ba2e-563bfd36b1d4"
UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
diff --git a/src/interface.jl b/src/interface.jl
index 37ac30b..71efff3 100755
--- a/src/interface.jl
+++ b/src/interface.jl
@@ -7,7 +7,7 @@ export agentReact, agentReflex,
formulateUserresponse, extractinfo, updateEnvState, chat_mistral_openorca,
recap
-using JSON3, DataStructures, Dates, UUIDs, HTTP
+using JSON3, DataStructures, Dates, UUIDs, HTTP, Random
using CommUtils, GeneralUtils
using ..type, ..utils
@@ -386,18 +386,6 @@ function actor_mistral_openorca(a::agentReflex, selfaware=nothing)
push!(toolslist, toolname)
end
- # shorttermMemory = dictToString(a.memory[:shortterm], skiplist=["user:"])
-
- # conversation = conversationSummary(a)
- # println("")
- # @show conversationSum = conversation
-
- # context =
- # """
- # Your talk with the user:
- # $conversation
- # """
-
thought = "Thought: you should always think about what to do according to the plan (pay attention to correct numeral calculation and commonsense and do one thing at a time.)"
if selfaware !== nothing
@@ -407,7 +395,7 @@ function actor_mistral_openorca(a::agentReflex, selfaware=nothing)
# aware = "Self-awareness: Based on action's input and observed results, check your progress against the plan. Then, repeat all the details of what you have been gathered. Finally, describe in detail what you are missing."
thought =
"Self-awareness: $selfaware
- Thought: you should always think about what to do according to self-awareness (1. let's think a single step. 2. focus on incomplete task 3. pay attention to correct numeral calculation and commonsense.)
+ Thought: you should always think about what to do according to self-awareness (1. let's think a single step, 2. focus on what is missing, 3. pay attention to correct numeral calculation and commonsense.)
"
end
@@ -432,14 +420,14 @@ function actor_mistral_openorca(a::agentReflex, selfaware=nothing)
Use the following format:
$thought
- Act: an action you intend to do based on your thought, must be one of [{toolnames}].
+ Act: an action should aligned with your thought, must be one of [{toolnames}].
Actinput: your input to the action (pay attention to the tool's input)
Obs: observed result of the action
Thought: Greet user and begin the conversation.
Act: askbox
- Actinput 2: {\"askbox\": \"Hello! Welcome to our wine store. I'd be happy to help you find a perfect bottle for your occasion. Could you please tell me about the special event or occasion for which you are buying this wine?\"}
+ Actinput: {\"askbox\": \"Hello! Welcome to our wine store.\"}
@@ -457,20 +445,16 @@ function actor_mistral_openorca(a::agentReflex, selfaware=nothing)
latestTask = nothing
tempcounter = 0.2
+ seed = nothing
while true # while Thought or Act is empty, run actor again
- tempcounter += 0.2
+ # tempcounter += 0.2
@show tempcounter
- response = sendReceivePrompt(a, prompt, max_tokens=1024, temperature=tempcounter, timeout=180,
- stopword=["Obs:", "<|system|>", ""])
+ response = sendReceivePrompt(a, prompt, max_tokens=1024, temperature=0.4, timeout=180,
+ stopword=["Thought:", "Obs:", "<|system|>", "", "<|end|>"],
+ seed=seed)
response = splittext(response, ["/n/n", "END", "End", "Obs", "<|im_end|>"])
latestTask = shortMemLatestTask(a.memory[:shortterm]) +1
-
- # if start == "Thought:"
- # response = "Thought $latestTask: " * response
- # else
- # response = "Self-awareness $latestTask: " * response
- # end
response = "Thought:" * response
@@ -499,34 +483,40 @@ function actor_mistral_openorca(a::agentReflex, selfaware=nothing)
chunkedtext = chunktext(response, headers)
# assuming length more than 10 character means LLM has valid thinking
- @show iskey_Thought = haskey(chunkedtext, "Thought $latestTask:")
- @show iskey_Act = haskey(chunkedtext, "Act $latestTask:")
- @show iskey_Actinput = haskey(chunkedtext, "Actinput $latestTask:")
+ check_1 = haskey(chunkedtext, "Thought $latestTask:")
+ check_2 = haskey(chunkedtext, "Act $latestTask:")
+ check_3 = haskey(chunkedtext, "Actinput $latestTask:")
# check whether the act has valid json
- isJsonReadable = false
- try
- act = GeneralUtils.getStringBetweenCharacters(response, '{', '}', endCharLocation="end")
- act = JSON3.read(act)
- isJsonReadable = true
- catch
- end
-
- if iskey_Thought && iskey_Act && iskey_Actinput
- istoolnameValid = false
- for i in toolslist
- if occursin(i, chunkedtext["Act $latestTask:"])
- istoolnameValid = true
- break
- end
- end
+ # check_4 = false
+ # try
+ # act = GeneralUtils.getStringBetweenCharacters(response, '{', '}', endCharLocation="end")
+ # act = JSON3.read(act)
+ # check_4 = true
+ # catch
+ # end
- if length(chunkedtext["Thought $latestTask:"]) > 5 && istoolnameValid &&
- length(chunkedtext["Actinput $latestTask:"]) > 5 && isJsonReadable
+ # check for a valid toolname
+ check_4 = false
+ for i in toolslist
+ if occursin(i, chunkedtext["Act $latestTask:"])
+ check_4 = true
break
end
end
- println("retry actor")
+
+ # check for empty Thought
+ check_5 = length(chunkedtext["Thought $latestTask:"]) > 5
+ # check for empty Actinput
+ check_6 = length(chunkedtext["Actinput $latestTask:"]) > 5
+
+ # print all check_1 to check_6
+ println("check_1: $check_1, check_2: $check_2, check_3: $check_3, check_4: $check_4, check_5: $check_5, check_6: $check_6")
+
+ if check_1 && check_2 && check_3 && check_4 && check_5 && check_6
+ #WORKING paraphrase selfaware
+ break
+ end
end
toolname = toolNameBeingCalled(chunkedtext["Act $latestTask:"], a.tools)
@@ -537,7 +527,7 @@ function actor_mistral_openorca(a::agentReflex, selfaware=nothing)
"Answer:", "Conclusion:", "Summary:"]
response = replaceHeaders(response, headerToDetect, latestTask)
println("")
-
+ @show actor_response_1 = response
headerToDetect = ["Plan $(a.attempt):",
"Thought $latestTask:",
"Act $latestTask:",
@@ -547,20 +537,28 @@ function actor_mistral_openorca(a::agentReflex, selfaware=nothing)
headers = detectCharacters(response, headerToDetect)
chunkedtext = chunktext(response, headers)
chunkedtext = delete!(chunkedtext, "Self-awareness $latestTask")
- act = GeneralUtils.getStringBetweenCharacters(response, '{', '}', endCharLocation="end")
- println("")
- @show actor_response_1 = act
- act = copy(JSON3.read(act))
- chunkedtext["Act $latestTask:"] = toolname
- chunkedtext["Actinput $latestTask:"] = act[Symbol(toolname)]
- toolinput = act[Symbol(toolname)]
+ println("")
+ @show chunkedtext
+
+ toolinput = chunkedtext["Actinput $latestTask:"]
+
+ # because tools has JSON input but sometime LLM output is not JSON, we need to check.
+ if occursin("{", toolinput)
+ act = GeneralUtils.getStringBetweenCharacters(response, '{', '}', endCharLocation="end")
+ act = copy(JSON3.read(act))
+ chunkedtext["Actinput $latestTask:"] = JSON3.write(act[Symbol(toolname)])
+ toolinput = act[Symbol(toolname)]
+ end
+ chunkedtext["Act $latestTask:"] = toolname
+
return toolname, toolinput, chunkedtext
end
+
"""
Chat with llm.
@@ -1089,7 +1087,7 @@ function formulateUserresponse(a)
Your work:
$work
- From your talk with the user and your work, formulate a response for the user.
+ From your talk with the user and your work, formulate a response for the user by comparing and explaining your rational behind each choice in details.
<|assistant|>
response:
@@ -1098,6 +1096,35 @@ function formulateUserresponse(a)
return response
end
+# function formulateUserresponse(a)
+# conversation = messagesToString_nomark(a.messages, addressAIas="I")
+# work = dictToString(a.memory[:shortterm])
+
+# prompt =
+# """
+# <|system|>
+# Symbol:
+# Plan: a plan
+# Thought: your thought
+# Act: the action you took
+# Actinput: the input to the action
+# Obs: the result of the action
+
+# Your talk with the user:
+# $conversation
+
+# Your work:
+# $work
+
+# From your talk with the user and your work, formulate a response for the user .
+#
+# <|assistant|>
+# response:
+# """
+# response = sendReceivePrompt(a, prompt)
+# return response
+# end
+
""" Extract important info from text into key-value pair text.
@@ -1416,12 +1443,6 @@ end
-
-
-
-
-
-
diff --git a/src/llmfunction.jl b/src/llmfunction.jl
index 4304fe5..b4af4f5 100644
--- a/src/llmfunction.jl
+++ b/src/llmfunction.jl
@@ -2,7 +2,7 @@ module llmfunction
export wikisearch, winestock
-using HTTP, JSON3, URIs
+using HTTP, JSON3, URIs, Random
using GeneralUtils
using ..type, ..utils
#------------------------------------------------------------------------------------------------100
@@ -124,7 +124,7 @@ function winestock(a::agentReflex, query::Dict)
Your are a helpful assistant.
-
+
Intensity level:
intensity = 1, light bodied
intensity = 2, light-medium bodied
@@ -149,9 +149,10 @@ function winestock(a::agentReflex, query::Dict)
acidity = 3, medium acidity
acidity = 4, medium-high acidity
acidity = 5, high acidity
-
+
- Write a SQL command using data from a JSON-format query.
+ Consult the conversion table then write a specific SQL command from a JSON-format query.
+ List of keywords not allowed in SQL: ["BETWEEN", "--"]
query: {\"wine type\": \"white\", \"wine characteristics\": \"full-bodied | off-dry | low acidity | medium tannin\", \"price\": {\"max\": \"50\"}}
@@ -169,34 +170,74 @@ function winestock(a::agentReflex, query::Dict)
"""
println("")
@show db_prompt = prompt
- response = sendReceivePrompt(a, prompt, max_tokens=256, temperature=0.4,
+ _sql = nothing
+ while true
+ _sql = sendReceivePrompt(a, prompt, max_tokens=256, temperature=0.2,
stopword=["/n/n", "END", "End", "Obs", "<|", ""])
- println("")
- @show db_response = response
+ # check for valid SQL command
+ check_1 = occursin("BETWEEN", _sql)
+ check_2 = occursin("--", _sql)
- # remove any blank character in front of a string
- newresponse = nothing
- for i in eachindex(response)
- if response[i] != ' '
- newresponse = response[i:end]
+ if check_1 == false && check_2 == false
break
end
end
- response = newresponse
+ println("")
+ @show db_sql = _sql
- error("winestock done")
-
- body =
- """
- INSERT INTO $tablename
- $(JSON3.write(insertdata));
- """
+ # remove any blank character in front of a string
+ newsql = nothing
+ for i in eachindex(_sql)
+ if _sql[i] != ' '
+ newsql = _sql[i:end]
+ break
+ end
+ end
+ sql = split(newsql, ";")[1] * ";"
+
+ body = sql
uri = URI(scheme="http", host="192.168.88.12", port="9010", path="/sql", userinfo="root:root")
r = HTTP.request("POST", uri, ["Accept" => "application/json", "NS"=>"yiem", "DB"=>"Blossom_wines"], body)
println("")
- @show r.body
+ @show r
+ a.memory[:r] = r
+ result = copy(JSON3.read(r.body))
+
+ wines = shuffle(result[1][:result]) # shuffle in case there are more than 1 result
+ println("")
+ @show wines
+
+ # choose only 2 wines
+ if length(wines) > 2
+ println("$(length(wines)) wines found")
+ wines = wines[1:2]
+ end
+
+ result = nothing
+ if length(wines) == 0
+ result =
+ """
+ Wine not found.
+ """
+ else
+ # write wines dictionary in to string
+ wines_str = ""
+ for (i, wine) in enumerate(wines)
+ winename = wine[:wine_name]
+ wines_str *= "$i: $(JSON3.write(wines[i])),"
+ end
+
+ result =
+ """
+ I found the following wines in our stock:
+ {
+ $wines_str
+ }
+ """
+ end
+ @show result
return result
end
@@ -225,15 +266,6 @@ end
-
-
-
-
-
-
-
-
-
diff --git a/src/type.jl b/src/type.jl
index 2bd4fce..9d3029f 100644
--- a/src/type.jl
+++ b/src/type.jl
@@ -99,7 +99,7 @@ julia> agent = ChatAgent.agentReflex(
env::AbstractString = "N/A"
thinkingFormat::Union{Dict, Nothing} = nothing
roleSpecificInstruction::Union{Dict, Nothing} = nothing
- memory::Dict = Dict(
+ memory::Dict{Symbol, Any} = Dict(
:shortterm=> OrderedDict{String, Any}(),
:longterm=> OrderedDict{String, Any}(),
:log=> OrderedDict{String, Any}(), # span from user stimulus -> multiples attempts -> final respond
@@ -141,7 +141,7 @@ function agentReflex(
- type of food that will be served with wine: ask the user
- ambient temperature at the serving location: ask the user
- type of wine (we have Rose, White, Red, Rose and Sparkling): ask the user
- - user's personal taste of wine characteristic: ask the user
+ - wine characteristics: ask the user
- wine price range: ask the user
- wines we have in stock: use winestock tool
"""
diff --git a/src/utils.jl b/src/utils.jl
index cff2189..e6d9b89 100644
--- a/src/utils.jl
+++ b/src/utils.jl
@@ -26,7 +26,7 @@ using ..type
```
"""
function sendReceivePrompt(a::T, prompt::String; max_tokens=256, timeout::Int=120,
- temperature::AbstractFloat=0.2, stopword=[]) where {T<:agent}
+ temperature::AbstractFloat=0.2, stopword=[], seed=nothing) where {T<:agent}
a.msgMeta[:msgId] = "$(uuid4())" # new msg id for each msg
msg = Dict(
:msgMeta=> a.msgMeta,
@@ -34,6 +34,7 @@ function sendReceivePrompt(a::T, prompt::String; max_tokens=256, timeout::Int=12
:max_tokens=> max_tokens,
:temperature=> temperature,
:stopword=> stopword,
+ :seed=> seed,
)
payloadChannel = Channel(1)