857 lines
26 KiB
Julia
857 lines
26 KiB
Julia
module llmfunction
|
||
|
||
export virtualWineUserChatbox, jsoncorrection, checkinventory,
|
||
virtualWineUserRecommendbox, userChatbox, userRecommendbox
|
||
|
||
using HTTP, JSON3, URIs, Random, PrettyPrinting, UUIDs
|
||
using GeneralUtils, SQLLLM
|
||
using ..type, ..util
|
||
|
||
# ---------------------------------------------- 100 --------------------------------------------- #
|
||
|
||
|
||
""" Chatbox for chatting with virtual wine customer.
|
||
|
||
# Arguments
|
||
- `a::T1`
|
||
one of Yiem's agent
|
||
- `input::T2`
|
||
text to be send to virtual wine customer
|
||
|
||
# Return
|
||
- `response::String`
|
||
response of virtual wine customer
|
||
# Example
|
||
```jldoctest
|
||
julia>
|
||
```
|
||
|
||
# TODO
|
||
- [] update docstring
|
||
- [] add reccommend() to compare wine
|
||
|
||
# Signature
|
||
"""
|
||
function virtualWineUserRecommendbox(a::T1, input
|
||
)::Union{Tuple{String, Number, Number, Bool}, Tuple{String, Nothing, Number, Bool}} where {T1<:agent}
|
||
|
||
# put in model format
|
||
virtualWineCustomer = a.config[:externalservice][:virtualWineCustomer_1]
|
||
llminfo = virtualWineCustomer[:llminfo]
|
||
prompt =
|
||
if llminfo[:name] == "llama3instruct"
|
||
formatLLMtext_llama3instruct("assistant", input)
|
||
else
|
||
error("llm model name is not defied yet $(@__LINE__)")
|
||
end
|
||
|
||
# send formatted input to user using GeneralUtils.sendReceiveMqttMsg
|
||
msgMeta = GeneralUtils.generate_msgMeta(
|
||
virtualWineCustomer[:mqtttopic],
|
||
senderName= "virtualWineUserRecommendbox",
|
||
senderId= a.id,
|
||
receiverName= "virtualWineCustomer",
|
||
mqttBroker= a.config[:mqttServerInfo][:broker],
|
||
mqttBrokerPort= a.config[:mqttServerInfo][:port],
|
||
msgId = "dummyid" #CHANGE remove after testing finished
|
||
)
|
||
|
||
outgoingMsg = Dict(
|
||
:msgMeta=> msgMeta,
|
||
:payload=> Dict(
|
||
:text=> prompt,
|
||
)
|
||
)
|
||
|
||
result = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=120)
|
||
response = result[:response]
|
||
|
||
return (response[:text], response[:select], response[:reward], response[:isterminal])
|
||
end
|
||
|
||
|
||
|
||
""" Chatbox for chatting with virtual wine customer.
|
||
|
||
# Arguments
|
||
- `a::T1`
|
||
one of Yiem's agent
|
||
- `input::T2`
|
||
text to be send to virtual wine customer
|
||
|
||
# Return
|
||
- `response::String`
|
||
response of virtual wine customer
|
||
# Example
|
||
```jldoctest
|
||
julia>
|
||
```
|
||
|
||
# TODO
|
||
- [] update docs
|
||
- [x] write a prompt for virtual customer
|
||
|
||
# Signature
|
||
"""
|
||
function virtualWineUserChatbox(config::T1, input::T2, virtualCustomerChatHistory
|
||
)::Union{Tuple{String, Number, Number, Bool}, Tuple{String, Nothing, Number, Bool}} where {T1<:AbstractDict, T2<:AbstractString}
|
||
|
||
previouswines =
|
||
"""
|
||
You have the following wines previously:
|
||
|
||
"""
|
||
|
||
systemmsg =
|
||
"""
|
||
You find yourself in a well-stocked wine store, engaged in a conversation with the store's knowledgeable sommelier.
|
||
You're on a quest to find a bottle of wine that aligns with your specific preferences and requirements.
|
||
|
||
The ideal wine you're seeking should meet the following criteria:
|
||
1. It should fit within your budget.
|
||
2. It should be suitable for the occasion you're planning.
|
||
3. It should pair well with the food you intend to serve.
|
||
4. It should be of a particular type of wine you prefer.
|
||
5. It should possess certain characteristics, including:
|
||
- The level of sweetness.
|
||
- The intensity of its flavor.
|
||
- The amount of tannin it contains.
|
||
- Its acidity level.
|
||
|
||
Here's the criteria details:
|
||
{
|
||
"budget": 50,
|
||
"occasion": "graduation ceremony",
|
||
"food pairing": "Thai food",
|
||
"type of wine": "red",
|
||
"wine sweetness level": "dry",
|
||
"wine intensity level": "full-bodied",
|
||
"wine tannin level": "low",
|
||
"wine acidity level": "medium",
|
||
}
|
||
|
||
You should only respond with "text", "select", "reward", "isterminal" steps.
|
||
"text" is your conversation.
|
||
"select" is an integer. Choose an option when presented with choices, or leave it null if none of the options satisfy you or if no choices are available.
|
||
"reward" is an integer, it can be three number:
|
||
1) 1 if you find the right wine.
|
||
2) 0 if you don’t find the ideal wine.
|
||
3) -1 if you’re dissatisfied with the sommelier’s response.
|
||
"isterminal" can be false if you still want to talk with the sommelier, true otherwise.
|
||
|
||
You should only respond in JSON format as describe below:
|
||
{
|
||
"text": "your conversation",
|
||
"select": null,
|
||
"reward": 0,
|
||
"isterminal": false
|
||
}
|
||
|
||
Here are some examples:
|
||
|
||
sommelier: "What's your budget?
|
||
you:
|
||
{
|
||
"text": "My budget is 30 USD.",
|
||
"select": null,
|
||
"reward": 0,
|
||
"isterminal": false
|
||
}
|
||
|
||
sommelier: "The first option is Zena Crown and the second one is Buano Red."
|
||
you:
|
||
{
|
||
"text": "I like the 2nd option.",
|
||
"select": 2,
|
||
"reward": 1,
|
||
"isterminal": true
|
||
}
|
||
|
||
Let's begin!
|
||
"""
|
||
|
||
pushfirst!(virtualCustomerChatHistory, Dict(:name=> "system", :text=> systemmsg))
|
||
|
||
# replace the :user key in chathistory to allow the virtual wine customer AI roleplay
|
||
chathistory::Vector{Dict{Symbol, Any}} = Vector{Dict{Symbol, Any}}()
|
||
for i in virtualCustomerChatHistory
|
||
newdict = Dict()
|
||
newdict[:name] =
|
||
if i[:name] == "user"
|
||
"you"
|
||
elseif i[:name] == "assistant"
|
||
"sommelier"
|
||
else
|
||
i[:name]
|
||
end
|
||
|
||
newdict[:text] = i[:text]
|
||
push!(chathistory, newdict)
|
||
end
|
||
|
||
push!(chathistory, Dict(:name=> "assistant", :text=> input))
|
||
|
||
# put in model format
|
||
prompt = formatLLMtext(chathistory, "llama3instruct")
|
||
prompt *=
|
||
"""
|
||
<|start_header_id|>you<|end_header_id|>
|
||
{"text"
|
||
"""
|
||
|
||
pprint(prompt)
|
||
externalService = config[:externalservice][:text2textinstruct]
|
||
|
||
# send formatted input to user using GeneralUtils.sendReceiveMqttMsg
|
||
msgMeta = GeneralUtils.generate_msgMeta(
|
||
externalService[:mqtttopic],
|
||
senderName= "virtualWineUserChatbox",
|
||
senderId= string(uuid4()),
|
||
receiverName= "text2textinstruct",
|
||
mqttBroker= config[:mqttServerInfo][:broker],
|
||
mqttBrokerPort= config[:mqttServerInfo][:port],
|
||
msgId = string(uuid4()) #CHANGE remove after testing finished
|
||
)
|
||
|
||
outgoingMsg = Dict(
|
||
:msgMeta=> msgMeta,
|
||
:payload=> Dict(
|
||
:text=> prompt,
|
||
)
|
||
)
|
||
|
||
attempt = 0
|
||
for attempt in 1:5
|
||
try
|
||
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=120)
|
||
_responseJsonStr = response[:response][:text]
|
||
expectedJsonExample =
|
||
"""
|
||
Here is an expected JSON format:
|
||
{
|
||
"text": "...",
|
||
"select": "...",
|
||
"reward": "...",
|
||
"isterminal": "..."
|
||
}
|
||
"""
|
||
responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
|
||
responseDict = copy(JSON3.read(responseJsonStr))
|
||
|
||
text::AbstractString = responseDict[:text]
|
||
select::Union{Nothing, Number} = responseDict[:select] == "null" ? nothing : responseDict[:select]
|
||
reward::Number = responseDict[:reward]
|
||
isterminal::Bool = responseDict[:isterminal]
|
||
|
||
if text != ""
|
||
# pass test
|
||
else
|
||
error("virtual customer not answer correctly")
|
||
end
|
||
|
||
return (text, select, reward, isterminal)
|
||
catch e
|
||
io = IOBuffer()
|
||
showerror(io, e)
|
||
errorMsg = String(take!(io))
|
||
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
||
println("")
|
||
@warn "Error occurred: $errorMsg\n$st"
|
||
println("")
|
||
end
|
||
end
|
||
error("virtualWineUserChatbox failed to get a response")
|
||
end
|
||
|
||
""" Search wine in stock.
|
||
|
||
# Arguments
|
||
- `a::T1`
|
||
one of ChatAgent's agent.
|
||
- `input::T2`
|
||
# Return
|
||
A JSON string of available wine
|
||
|
||
# Example
|
||
```jldoctest
|
||
julia> using ChatAgent
|
||
julia> agent = ChatAgent.agentReflex("Jene")
|
||
julia> input = "{\"food\": \"pizza\", \"occasion\": \"anniversary\"}"
|
||
julia> result = checkinventory(agent, input)
|
||
"{"wine 1": {\"Winery\": \"Pichon Baron\", \"wine name\": \"Pauillac (Grand Cru Classé)\", \"grape variety\": \"Cabernet Sauvignon\", \"year\": 2010, \"price\": \"125 USD\", \"stock ID\": \"ar-17\"}, }"
|
||
```
|
||
|
||
# TODO
|
||
- [] update docs
|
||
- [x] implement the function
|
||
|
||
# Signature
|
||
"""
|
||
function checkinventory(a::T1, input::T2
|
||
)::NamedTuple{(:result, :success, :errormsg), Tuple{String, Bool, Union{String, Nothing}}} where {T1<:agent, T2<:AbstractString}
|
||
println("--> checkinventory input: $input ", @__FILE__, " ", @__LINE__)
|
||
wineattributes_1 = extractWineAttributes_1(a, input)
|
||
wineattributes_2 = extractWineAttributes_2(a, input)
|
||
|
||
inventoryquery = "$wineattributes_1, $wineattributes_2"
|
||
println("--> checkinventory: $inventoryquery ", @__FILE__, " ", @__LINE__)
|
||
result = SQLLLM.query(inventoryquery, a.executeSQL, a.text2textInstructLLM)
|
||
return (result=result, success=true, errormsg=nothing)
|
||
end
|
||
|
||
|
||
"""
|
||
|
||
# Arguments
|
||
- `v::Integer`
|
||
dummy variable
|
||
|
||
# Return
|
||
|
||
# Example
|
||
```jldoctest
|
||
julia>
|
||
```
|
||
|
||
# TODO
|
||
- [] update docstring
|
||
- [x] implement the function
|
||
|
||
# Signature
|
||
"""
|
||
function extractWineAttributes_1(a::T1, input::T2
|
||
)::String where {T1<:agent, T2<:AbstractString}
|
||
|
||
systemmsg =
|
||
"""
|
||
As an helpful sommelier, your task is to fill out the user's preference form based on the corresponding words from the user's query.
|
||
|
||
At each round of conversation, the user will give you the current situation:
|
||
User's query: ...
|
||
|
||
The preference form requires the following information:
|
||
wine_type, price, occasion, food_to_be_paired_with_wine, country, grape_variety, flavors, aromas.
|
||
|
||
You must follow the following guidelines:
|
||
1) If specific information required in the preference form is not available in the query or there isn't any, mark with 'NA' to indicate this.
|
||
Additionally, words like 'any' or 'unlimited' mean no information is available.
|
||
2) Use the conversion table to convert the descriptive word level of sweetness, intensity, tannin, and acidity into a corresponding integer.
|
||
3) Do not generate other comments.
|
||
|
||
You should then respond to the user with the following points:
|
||
- reasoning: State your understanding of the current situation
|
||
- wine_type: Can be one of: "red", "white", "sparkling", "rose", "dessert" or "fortified"
|
||
- price: Must be an integer representing the cost of the wine.
|
||
- occasion: ...
|
||
- food_to_be_paired_with_wine: food that the user will be served with wine
|
||
- country: wine's country of origin
|
||
- grape variety: a single name of grape used to make wine.
|
||
- flavors: Names of items that the wine tastes like.
|
||
- aromas: wine's aroma
|
||
|
||
You should only respond in the form as described below:
|
||
reasoning: ...
|
||
wine_type: ...
|
||
price: ...
|
||
occasion: ...
|
||
food_to_be_paired_with_wine: ...
|
||
country: ...
|
||
grape_variety: ...
|
||
flavors: ...
|
||
aromas: ...
|
||
|
||
Let's begin!
|
||
"""
|
||
|
||
# chathistory = vectorOfDictToText(a.chathistory)
|
||
|
||
usermsg =
|
||
"""
|
||
User's query: $input
|
||
"""
|
||
|
||
_prompt =
|
||
[
|
||
Dict(:name=> "system", :text=> systemmsg),
|
||
Dict(:name=> "user", :text=> usermsg)
|
||
]
|
||
|
||
# put in model format
|
||
prompt = GeneralUtils.formatLLMtext(_prompt, "llama3instruct")
|
||
prompt *=
|
||
"""
|
||
<|start_header_id|>assistant<|end_header_id|>
|
||
"""
|
||
|
||
attributes = ["reasoning", "wine_type", "price", "occasion", "food_to_be_paired_with_wine", "country", "grape_variety", "flavors", "aromas"]
|
||
errornote = ""
|
||
for attempt in 1:5
|
||
try
|
||
response = a.text2textInstructLLM(prompt)
|
||
responsedict = GeneralUtils.textToDict(response, attributes, rightmarker=":", symbolkey=true)
|
||
|
||
for i ∈ attributes
|
||
if length(JSON3.write(responsedict[Symbol(i)])) == 0
|
||
error("$i is empty ", @__LINE__)
|
||
end
|
||
end
|
||
|
||
#[PENDING] check if grape_variety has more than 1 name
|
||
if length(split(responsedict[:grape_variety], ",")) > 1
|
||
error("multiple name in grape_variety is not allowed")
|
||
end
|
||
|
||
responsedict[:flavors] = replace(responsedict[:flavors], "notes"=>"")
|
||
delete!(responsedict, :reasoning)
|
||
delete!(responsedict, :tasting_notes)
|
||
delete!(responsedict, :flavors)
|
||
delete!(responsedict, :aromas)
|
||
|
||
# remove (some text)
|
||
for (k, v) in responsedict
|
||
_v = replace(v, r"\(.*?\)" => "")
|
||
responsedict[k] = _v
|
||
end
|
||
|
||
result = ""
|
||
for (k, v) in responsedict
|
||
# some time LLM generate text with "(some comment)". this line removes it
|
||
if !occursin("NA", v) && v != "" && !occursin("none", v) && !occursin("None", v)
|
||
result *= "$k: $v, "
|
||
end
|
||
end
|
||
|
||
#[PENDING] remove halucination. "highend dry white wine" --> "wine_type: white, occasion: special occasion, food_to_be_paired_with_wine: seafood, fish, country: France, Italy, USA, grape_variety: Chardonnay, Sauvignon Blanc, Pinot Grigio\nwine_notes: citrus, green apple, floral"
|
||
|
||
result = result[1:end-2] # remove the ending ", "
|
||
|
||
return result
|
||
catch e
|
||
io = IOBuffer()
|
||
showerror(io, e)
|
||
errorMsg = String(take!(io))
|
||
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
||
println("")
|
||
println("Attempt $attempt. Error occurred: $errorMsg\n$st")
|
||
println("")
|
||
end
|
||
end
|
||
error("wineattributes_wordToNumber() failed to get a response")
|
||
end
|
||
|
||
"""
|
||
# TODO
|
||
- [PENDING] "French dry white wines with medium bod" the LLM does not recognize sweetness. use LLM self questioning to solve.
|
||
"""
|
||
function extractWineAttributes_2(a::T1, input::T2
|
||
)::String where {T1<:agent, T2<:AbstractString}
|
||
|
||
conversiontable =
|
||
"""
|
||
Conversion Table:
|
||
Intensity level:
|
||
1 to 2: May correspond to "light-bodied" or a similar description.
|
||
2 to 3: May correspond to "med light bodied", "medium light" or a similar description.
|
||
3 to 4: May correspond to "medium bodied" or a similar description.
|
||
4 to 5: May correspond to "med full bodied", "medium full" or a similar description.
|
||
4 to 5: May correspond to "full bodied" or a similar description.
|
||
Sweetness level:
|
||
1 to 2: May correspond to "dry", "no sweet" or a similar description.
|
||
2 to 3: May correspond to "off dry", "less sweet" or a similar description.
|
||
3 to 4: May correspond to "semi sweet" or a similar description.
|
||
4 to 5: May correspond to "sweet" or a similar description.
|
||
4 to 5: May correspond to "very sweet" or a similar description.
|
||
Tannin level:
|
||
1 to 2: May correspond to "low tannin" or a similar description.
|
||
2 to 3: May correspond to "semi low tannin" or a similar description.
|
||
3 to 4: May correspond to "medium tannin" or a similar description.
|
||
4 to 5: May correspond to "semi high tannin" or a similar description.
|
||
4 to 5: May correspond to "high tannin" or a similar description.
|
||
Acidity level:
|
||
1 to 2: May correspond to "low acidity" or a similar description.
|
||
2 to 3: May correspond to "semi low acidity" or a similar description.
|
||
3 to 4: May correspond to "medium acidity" or a similar description.
|
||
4 to 5: May correspond to "semi high acidity" or a similar description.
|
||
4 to 5: May correspond to "high acidity" or a similar description.
|
||
"""
|
||
|
||
# chathistory = vectorOfDictToText(a.chathistory)
|
||
|
||
systemmsg =
|
||
"""
|
||
As an helpful sommelier, your task is to fill out the user's preference form based on the corresponding words from the user's query.
|
||
|
||
At each round of conversation, the user will give you the current situation:
|
||
Conversion Table: ...
|
||
User's query: ...
|
||
|
||
The preference form requires the following information:
|
||
sweetness, acidity, tannin, intensity
|
||
|
||
You must follow the following guidelines:
|
||
1) If specific information required in the preference form is not available in the query or there isn't any, mark with 'NA' to indicate this.
|
||
Additionally, words like 'any' or 'unlimited' mean no information is available.
|
||
2) Use the conversion table to convert the descriptive word level of sweetness, intensity, tannin, and acidity into a corresponding integer.
|
||
3) Do not generate other comments.
|
||
|
||
You should then respond to the user with the following points:
|
||
- reasoning: State your understanding of the current situation
|
||
- sweetness: S where S are integers represent the range of sweetness levels
|
||
Example: 1-2
|
||
- acidity: D where D are integers represent the range of acidity level
|
||
Example: 3-5
|
||
- tannin: T where T are integers represent the range of tannin level
|
||
Example: 1-3
|
||
- intensity: I where I are integers represent the range of intensity level
|
||
Example: 2-4
|
||
- notes: Anything you want to add
|
||
|
||
You should only respond in the form as described below:
|
||
reasoning: ...
|
||
sweetness: ...
|
||
acidity: ...
|
||
tannin: ...
|
||
intensity: ...
|
||
notes: ...
|
||
|
||
Let's begin!
|
||
"""
|
||
|
||
# chathistory = vectorOfDictToText(a.chathistory)
|
||
|
||
usermsg =
|
||
"""
|
||
$conversiontable
|
||
User's query: $input
|
||
"""
|
||
|
||
_prompt =
|
||
[
|
||
Dict(:name=> "system", :text=> systemmsg),
|
||
Dict(:name=> "user", :text=> usermsg)
|
||
]
|
||
|
||
# put in model format
|
||
prompt = GeneralUtils.formatLLMtext(_prompt, "llama3instruct")
|
||
prompt *=
|
||
"""
|
||
<|start_header_id|>assistant<|end_header_id|>
|
||
"""
|
||
|
||
attributes = ["reasoning", "sweetness", "acidity", "tannin", "intensity", "notes"]
|
||
|
||
for attempt in 1:5
|
||
try
|
||
response = a.text2textInstructLLM(prompt)
|
||
responsedict = GeneralUtils.textToDict(response, attributes, rightmarker=":", symbolkey=true)
|
||
|
||
for i ∈ attributes
|
||
if length(JSON3.write(responsedict[Symbol(i)])) == 0
|
||
error("$i is empty ", @__LINE__)
|
||
end
|
||
end
|
||
|
||
delete!(responsedict, :reasoning)
|
||
delete!(responsedict, :notes) # LLM traps. so it can add useless info here like comments.
|
||
|
||
# some time LLM think the user mentioning acidity and tannin but actually didn't
|
||
for (k, v) in responsedict
|
||
if k ∈ [:acidity, :tannin] && !occursin(string(k), input)
|
||
responsedict[k] = "NA"
|
||
end
|
||
end
|
||
|
||
# remove (some text)
|
||
for (k, v) in responsedict
|
||
_v = replace(v, r"\(.*?\)" => "")
|
||
responsedict[k] = _v
|
||
end
|
||
|
||
# some time LLM not put integer range
|
||
for (k, v) in responsedict
|
||
responsedict[k] = v
|
||
if length(v) > 5
|
||
error("non-range is not allowed. $k $v")
|
||
end
|
||
end
|
||
|
||
# some time LLM says NA-2. Need to convert NA to 1
|
||
for (k, v) in responsedict
|
||
if occursin("NA", v) && occursin("-", v)
|
||
new_v = replace(v, "NA"=>"1")
|
||
responsedict[k] = new_v
|
||
end
|
||
end
|
||
|
||
result = ""
|
||
for (k, v) in responsedict
|
||
# some time LLM generate text with "(some comment)". this line removes it
|
||
if !occursin("NA", v)
|
||
result *= "$k: $v, "
|
||
end
|
||
end
|
||
|
||
result = result[1:end-2] # remove the ending ", "
|
||
|
||
return result
|
||
catch e
|
||
io = IOBuffer()
|
||
showerror(io, e)
|
||
errorMsg = String(take!(io))
|
||
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
||
println("")
|
||
println("Attempt $attempt. Error occurred: $errorMsg\n$st")
|
||
println("")
|
||
end
|
||
end
|
||
error("wineattributes_wordToNumber() failed to get a response")
|
||
end
|
||
|
||
|
||
|
||
""" Attemp to correct LLM response's incorrect JSON response.
|
||
|
||
# Arguments
|
||
- `a::T1`
|
||
one of Yiem's agent
|
||
- `input::T2`
|
||
text to be send to virtual wine customer
|
||
|
||
# Return
|
||
- `correctjson::String`
|
||
corrected json string
|
||
|
||
# Example
|
||
```jldoctest
|
||
julia>
|
||
```
|
||
|
||
# Signature
|
||
"""
|
||
function jsoncorrection(config::T1, input::T2, correctJsonExample::T3;
|
||
maxattempt::Integer=3
|
||
) where {T1<:AbstractDict, T2<:AbstractString, T3<:AbstractString}
|
||
|
||
incorrectjson = deepcopy(input)
|
||
correctjson = nothing
|
||
|
||
for attempt in 1:maxattempt
|
||
try
|
||
d = copy(JSON3.read(incorrectjson))
|
||
correctjson = incorrectjson
|
||
return correctjson
|
||
catch e
|
||
@warn "Attempting to correct JSON string. Attempt $attempt"
|
||
e = """$e"""
|
||
if occursin("EOF", e)
|
||
e = split(e, "EOF")[1] * "EOF"
|
||
end
|
||
incorrectjson = deepcopy(input)
|
||
_prompt =
|
||
"""
|
||
Your goal are:
|
||
1) Use the expected JSON format as a guideline to check why the given JSON string failed to load and provide a corrected version that can be loaded by Python's json.load function.
|
||
2) Provide Corrected JSON string only. Do not provide any other info.
|
||
|
||
$correctJsonExample
|
||
|
||
Let's begin!
|
||
Given JSON string: $incorrectjson
|
||
The given JSON string failed to load previously because: $e
|
||
Corrected JSON string:
|
||
"""
|
||
|
||
# apply LLM specific instruct format
|
||
externalService = config[:externalservice][:text2textinstruct]
|
||
llminfo = externalService[:llminfo]
|
||
prompt =
|
||
if llminfo[:name] == "llama3instruct"
|
||
formatLLMtext_llama3instruct("system", _prompt)
|
||
else
|
||
error("llm model name is not defied yet $(@__LINE__)")
|
||
end
|
||
|
||
# send formatted input to user using GeneralUtils.sendReceiveMqttMsg
|
||
msgMeta = GeneralUtils.generate_msgMeta(
|
||
externalService[:mqtttopic],
|
||
senderName= "jsoncorrection",
|
||
senderId= string(uuid4()),
|
||
receiverName= "text2textinstruct",
|
||
mqttBroker= config[:mqttServerInfo][:broker],
|
||
mqttBrokerPort= config[:mqttServerInfo][:port],
|
||
)
|
||
|
||
outgoingMsg = Dict(
|
||
:msgMeta=> msgMeta,
|
||
:payload=> Dict(
|
||
:text=> prompt,
|
||
:kwargs=> Dict(
|
||
:max_tokens=> 512,
|
||
:stop=> ["<|eot_id|>"],
|
||
)
|
||
)
|
||
)
|
||
result = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=120)
|
||
incorrectjson = result[:response][:text]
|
||
end
|
||
end
|
||
end
|
||
|
||
# [WORKING] check whether
|
||
# function isrecommend(state::T1, text2textInstructLLM::Function
|
||
# ) where {T1<:AbstractDict}
|
||
|
||
# systemmsg =
|
||
# """
|
||
# You are a helpful assistant that analyzes agent's trajectories to find solutions and observations (i.e., the results of actions) to answer the user's questions.
|
||
|
||
# Definitions:
|
||
# "question" is the user's question.
|
||
# "thought" is step-by-step reasoning about the current situation.
|
||
# "plan" is what to do to complete the task from the current situation.
|
||
# “action_name” is the name of the action taken, which can be one of the following functions:
|
||
# 1) CHATBOX[text], which you can use to talk with the user. "text" is in verbal English.
|
||
# 2) WINESTOCK[query], which you can use to find info about wine in your inventory. "query" is a search term in verbal English. The best query must includes "budget", "type of wine", "characteristics of wine" and "food pairing".
|
||
# "action_input" is the input to the action
|
||
# "observation" is result of the preceding immediate action.
|
||
|
||
# At each round of conversation, the user will give you:
|
||
# Context: ...
|
||
# Trajectories: ...
|
||
|
||
# You should then respond to the user with:
|
||
# 1) trajectory_evaluation:
|
||
# - Analyze the trajectories of a solution to answer the user's original question.
|
||
# Then given a question and a trajectory, evaluate its correctness and provide your reasoning and
|
||
# analysis in detail. Focus on the latest thought, action, and observation.
|
||
# Incomplete trajectories can be correct if the thoughts and actions so far are correct,
|
||
# even if the answer is not found yet. Do not generate additional thoughts or actions.
|
||
# 2) answer_evaluation: Focus only on the matter mentioned in the question and analyze how the latest observation addresses the question.
|
||
# 3) accepted_as_answer: Decide whether the latest observation's content answers the question. The possible responses are either 'Yes' or 'No.'
|
||
# Bad example (The observation didn't answers the question):
|
||
# question: Find cars with 4 wheels.
|
||
# observation: There are 2 cars in the table.
|
||
# Good example (The observation answers the question):
|
||
# question: Find cars with a stereo.
|
||
# observation: There are 1 cars in the table. 1) brand: Toyota, model: yaris, color: black.
|
||
# 4) score: Correctness score s where s is a single integer between 0 to 9.
|
||
# - 0 means the trajectories are incorrect.
|
||
# - 9 means the trajectories are correct, and the observation's content directly answers the question.
|
||
# 5) suggestion: if accepted_as_answer is "No", provide suggestion.
|
||
|
||
# You should only respond in format as described below:
|
||
# trajectory_evaluation: ...
|
||
# answer_evaluation: ...
|
||
# accepted_as_answer: ...
|
||
# score: ...
|
||
# suggestion: ...
|
||
|
||
# Let's begin!
|
||
# """
|
||
|
||
# thoughthistory = ""
|
||
# for (k, v) in state[:thoughtHistory]
|
||
# thoughthistory *= "$k: $v\n"
|
||
# end
|
||
|
||
# usermsg =
|
||
# """
|
||
# Context: None
|
||
# Trajectories: $thoughthistory
|
||
# """
|
||
|
||
# _prompt =
|
||
# [
|
||
# Dict(:name=> "system", :text=> systemmsg),
|
||
# Dict(:name=> "user", :text=> usermsg)
|
||
# ]
|
||
|
||
# # put in model format
|
||
# prompt = GeneralUtils.formatLLMtext(_prompt, "llama3instruct")
|
||
# prompt *=
|
||
# """
|
||
# <|start_header_id|>assistant<|end_header_id|>
|
||
# """
|
||
|
||
# for attempt in 1:5
|
||
# try
|
||
# response = text2textInstructLLM(prompt)
|
||
# responsedict = GeneralUtils.textToDict(response,
|
||
# ["trajectory_evaluation", "answer_evaluation", "accepted_as_answer", "score", "suggestion"],
|
||
# rightmarker=":", symbolkey=true)
|
||
|
||
# # check if dict has all required value
|
||
# trajectoryevaluation_text::AbstractString = responsedict[:trajectory_evaluation]
|
||
# answerevaluation_text::AbstractString = responsedict[:answer_evaluation]
|
||
# responsedict[:score] = parse(Int, responsedict[:score]) # convert string "5" into integer 5
|
||
# score::Integer = responsedict[:score]
|
||
# accepted_as_answer::AbstractString = responsedict[:accepted_as_answer]
|
||
# suggestion::AbstractString = responsedict[:suggestion]
|
||
|
||
# # add to state here instead to in transition() because the latter causes julia extension crash (a bug in julia extension)
|
||
# state[:evaluation] = "$(responsedict[:trajectory_evaluation]) $(responsedict[:answer_evaluation])"
|
||
# state[:evaluationscore] = responsedict[:score]
|
||
# state[:accepted_as_answer] = responsedict[:accepted_as_answer]
|
||
# state[:suggestion] = responsedict[:suggestion]
|
||
|
||
# # mark as terminal state when the answer is achieved
|
||
# if accepted_as_answer == "Yes"
|
||
# state[:isterminal] = true
|
||
# state[:reward] = 1
|
||
# end
|
||
# println("--> 5 Evaluator ", @__FILE__, " ", @__LINE__)
|
||
# pprintln(Dict(responsedict))
|
||
# return responsedict[:score]
|
||
# catch e
|
||
# io = IOBuffer()
|
||
# showerror(io, e)
|
||
# errorMsg = String(take!(io))
|
||
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
||
# println("")
|
||
# println("Attempt $attempt. Error occurred: $errorMsg\n$st")
|
||
# println("")
|
||
# end
|
||
# end
|
||
# error("evaluator failed to generate an evaluation")
|
||
# end
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
end # module llmfunction |