1083 lines
38 KiB
Julia
1083 lines
38 KiB
Julia
module llmfunction
|
||
|
||
export virtualWineUserChatbox, jsoncorrection, checkinventory, # recommendbox,
|
||
virtualWineUserRecommendbox, userChatbox, userRecommendbox, extractWineAttributes_1,
|
||
extractWineAttributes_2, paraphrase
|
||
|
||
using HTTP, JSON3, URIs, Random, PrettyPrinting, UUIDs, Dates
|
||
using GeneralUtils, SQLLLM
|
||
using ..type, ..util
|
||
|
||
# ---------------------------------------------- 100 --------------------------------------------- #
|
||
|
||
|
||
""" Chatbox for chatting with virtual wine customer.
|
||
|
||
# Arguments
|
||
- `a::T1`
|
||
one of Yiem's agent
|
||
- `input::T2`
|
||
text to be send to virtual wine customer
|
||
|
||
# Return
|
||
- `response::String`
|
||
response of virtual wine customer
|
||
# Example
|
||
```jldoctest
|
||
julia>
|
||
```
|
||
|
||
# TODO
|
||
- [] update docstring
|
||
- [] add reccommend() to compare wine
|
||
|
||
# Signature
|
||
"""
|
||
function virtualWineUserRecommendbox(a::T1, input
|
||
)::Union{Tuple{String, Number, Number, Bool}, Tuple{String, Nothing, Number, Bool}} where {T1<:agent}
|
||
|
||
# put in model format
|
||
virtualWineCustomer = a.config[:externalservice][:virtualWineCustomer_1]
|
||
llminfo = virtualWineCustomer[:llminfo]
|
||
prompt =
|
||
if llminfo[:name] == "llama3instruct"
|
||
formatLLMtext_llama3instruct("assistant", input)
|
||
else
|
||
error("llm model name is not defied yet $(@__LINE__)")
|
||
end
|
||
|
||
# send formatted input to user using GeneralUtils.sendReceiveMqttMsg
|
||
msgMeta = GeneralUtils.generate_msgMeta(
|
||
virtualWineCustomer[:mqtttopic],
|
||
senderName= "virtualWineUserRecommendbox",
|
||
senderId= a.id,
|
||
receiverName= "virtualWineCustomer",
|
||
mqttBroker= a.config[:mqttServerInfo][:broker],
|
||
mqttBrokerPort= a.config[:mqttServerInfo][:port],
|
||
msgId = "dummyid" #CHANGE remove after testing finished
|
||
)
|
||
|
||
outgoingMsg = Dict(
|
||
:msgMeta=> msgMeta,
|
||
:payload=> Dict(
|
||
:text=> prompt,
|
||
)
|
||
)
|
||
|
||
result = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=120)
|
||
response = result[:response]
|
||
|
||
return (response[:text], response[:select], response[:reward], response[:isterminal])
|
||
end
|
||
|
||
|
||
|
||
""" Chatbox for chatting with virtual wine customer.
|
||
|
||
# Arguments
|
||
- `a::T1`
|
||
one of Yiem's agent
|
||
- `input::T2`
|
||
text to be send to virtual wine customer
|
||
|
||
# Return
|
||
- `response::String`
|
||
response of virtual wine customer
|
||
# Example
|
||
```jldoctest
|
||
julia>
|
||
```
|
||
|
||
# TODO
|
||
- [] update docs
|
||
- [x] write a prompt for virtual customer
|
||
|
||
# Signature
|
||
"""
|
||
function virtualWineUserChatbox(config::T1, input::T2, virtualCustomerChatHistory
|
||
)::Union{Tuple{String, Number, Number, Bool}, Tuple{String, Nothing, Number, Bool}} where {T1<:AbstractDict, T2<:AbstractString}
|
||
|
||
previouswines =
|
||
"""
|
||
You have the following wines previously:
|
||
|
||
"""
|
||
|
||
systemmsg =
|
||
"""
|
||
You find yourself in a well-stocked wine store, engaged in a conversation with the store's knowledgeable sommelier.
|
||
You're on a quest to find a bottle of wine that aligns with your specific preferences and requirements.
|
||
|
||
The ideal wine you're seeking should meet the following criteria:
|
||
1. It should fit within your budget.
|
||
2. It should be suitable for the occasion you're planning.
|
||
3. It should pair well with the food you intend to serve.
|
||
4. It should be of a particular type of wine you prefer.
|
||
5. It should possess certain characteristics, including:
|
||
- The level of sweetness.
|
||
- The intensity of its flavor.
|
||
- The amount of tannin it contains.
|
||
- Its acidity level.
|
||
|
||
Here's the criteria details:
|
||
{
|
||
"budget": 50,
|
||
"occasion": "graduation ceremony",
|
||
"food pairing": "Thai food",
|
||
"type of wine": "red",
|
||
"wine sweetness level": "dry",
|
||
"wine intensity level": "full-bodied",
|
||
"wine tannin level": "low",
|
||
"wine acidity level": "medium",
|
||
}
|
||
|
||
You should only respond with "text", "select", "reward", "isterminal" steps.
|
||
"text" is your conversation.
|
||
"select" is an integer. Choose an option when presented with choices, or leave it null if none of the options satisfy you or if no choices are available.
|
||
"reward" is an integer, it can be three number:
|
||
1) 1 if you find the right wine.
|
||
2) 0 if you don’t find the ideal wine.
|
||
3) -1 if you’re dissatisfied with the sommelier’s response.
|
||
"isterminal" can be false if you still want to talk with the sommelier, true otherwise.
|
||
|
||
You should only respond in JSON format as describe below:
|
||
{
|
||
"text": "your conversation",
|
||
"select": null,
|
||
"reward": 0,
|
||
"isterminal": false
|
||
}
|
||
|
||
Here are some examples:
|
||
|
||
sommelier: "What's your budget?
|
||
you:
|
||
{
|
||
"text": "My budget is 30 USD.",
|
||
"select": null,
|
||
"reward": 0,
|
||
"isterminal": false
|
||
}
|
||
|
||
sommelier: "The first option is Zena Crown and the second one is Buano Red."
|
||
you:
|
||
{
|
||
"text": "I like the 2nd option.",
|
||
"select": 2,
|
||
"reward": 1,
|
||
"isterminal": true
|
||
}
|
||
|
||
Let's begin!
|
||
"""
|
||
|
||
pushfirst!(virtualCustomerChatHistory, Dict(:name=> "system", :text=> systemmsg))
|
||
|
||
# replace the :user key in chathistory to allow the virtual wine customer AI roleplay
|
||
chathistory::Vector{Dict{Symbol, Any}} = Vector{Dict{Symbol, Any}}()
|
||
for i in virtualCustomerChatHistory
|
||
newdict = Dict()
|
||
newdict[:name] =
|
||
if i[:name] == "user"
|
||
"you"
|
||
elseif i[:name] == "assistant"
|
||
"sommelier"
|
||
else
|
||
i[:name]
|
||
end
|
||
|
||
newdict[:text] = i[:text]
|
||
push!(chathistory, newdict)
|
||
end
|
||
|
||
push!(chathistory, Dict(:name=> "assistant", :text=> input))
|
||
|
||
# put in model format
|
||
prompt = formatLLMtext(chathistory, "llama3instruct")
|
||
prompt *=
|
||
"""
|
||
<|start_header_id|>you<|end_header_id|>
|
||
{"text"
|
||
"""
|
||
|
||
pprint(prompt)
|
||
externalService = config[:externalservice][:text2textinstruct]
|
||
|
||
# send formatted input to user using GeneralUtils.sendReceiveMqttMsg
|
||
msgMeta = GeneralUtils.generate_msgMeta(
|
||
externalService[:mqtttopic],
|
||
senderName= "virtualWineUserChatbox",
|
||
senderId= string(uuid4()),
|
||
receiverName= "text2textinstruct",
|
||
mqttBroker= config[:mqttServerInfo][:broker],
|
||
mqttBrokerPort= config[:mqttServerInfo][:port],
|
||
msgId = string(uuid4()) #CHANGE remove after testing finished
|
||
)
|
||
|
||
outgoingMsg = Dict(
|
||
:msgMeta=> msgMeta,
|
||
:payload=> Dict(
|
||
:text=> prompt,
|
||
)
|
||
)
|
||
|
||
attempt = 0
|
||
for attempt in 1:5
|
||
try
|
||
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=120)
|
||
_responseJsonStr = response[:response][:text]
|
||
expectedJsonExample =
|
||
"""
|
||
Here is an expected JSON format:
|
||
{
|
||
"text": "...",
|
||
"select": "...",
|
||
"reward": "...",
|
||
"isterminal": "..."
|
||
}
|
||
"""
|
||
responseJsonStr = jsoncorrection(config, _responseJsonStr, expectedJsonExample)
|
||
responseDict = copy(JSON3.read(responseJsonStr))
|
||
|
||
text::AbstractString = responseDict[:text]
|
||
select::Union{Nothing, Number} = responseDict[:select] == "null" ? nothing : responseDict[:select]
|
||
reward::Number = responseDict[:reward]
|
||
isterminal::Bool = responseDict[:isterminal]
|
||
|
||
if text != ""
|
||
# pass test
|
||
else
|
||
error("virtual customer not answer correctly")
|
||
end
|
||
|
||
return (text, select, reward, isterminal)
|
||
catch e
|
||
io = IOBuffer()
|
||
showerror(io, e)
|
||
errorMsg = String(take!(io))
|
||
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
||
println("")
|
||
@warn "Error occurred: $errorMsg\n$st"
|
||
println("")
|
||
end
|
||
end
|
||
error("virtualWineUserChatbox failed to get a response")
|
||
end
|
||
|
||
""" Search wine in stock.
|
||
|
||
# Arguments
|
||
- `a::T1`
|
||
one of ChatAgent's agent.
|
||
- `input::T2`
|
||
# Return
|
||
A JSON string of available wine
|
||
|
||
# Example
|
||
```jldoctest
|
||
julia> using ChatAgent
|
||
julia> agent = ChatAgent.agentReflex("Jene")
|
||
julia> input = "{\"food\": \"pizza\", \"occasion\": \"anniversary\"}"
|
||
julia> result = checkinventory(agent, input)
|
||
"{"wine 1": {\"Winery\": \"Pichon Baron\", \"wine name\": \"Pauillac (Grand Cru Classé)\", \"grape variety\": \"Cabernet Sauvignon\", \"year\": 2010, \"price\": \"125 USD\", \"stock ID\": \"ar-17\"}, }"
|
||
```
|
||
|
||
# TODO
|
||
- [] update docs
|
||
- [x] implement the function
|
||
|
||
# Signature
|
||
"""
|
||
function checkinventory(a::T1, input::T2
|
||
) where {T1<:agent, T2<:AbstractString}
|
||
|
||
println("\ncheckinventory order: $input ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
wineattributes_1 = extractWineAttributes_1(a, input)
|
||
wineattributes_2 = extractWineAttributes_2(a, input)
|
||
|
||
_inventoryquery = "retailer name: $(a.retailername), $wineattributes_1, $wineattributes_2"
|
||
inventoryquery = "Retrieves winery, wine_name, wine_id, vintage, region, country, wine_type, grape, serving_temperature, sweetness, intensity, tannin, acidity, tasting_notes, price and currency of wines that match the following criteria - {$_inventoryquery}"
|
||
println("\ncheckinventory input: $inventoryquery ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
# add suppport for similarSQLVectorDB
|
||
textresult, rawresponse = SQLLLM.query(inventoryquery, a.func[:executeSQL],
|
||
a.func[:text2textInstructLLM];
|
||
insertSQLVectorDB=a.func[:insertSQLVectorDB],
|
||
similarSQLVectorDB=a.func[:similarSQLVectorDB],
|
||
llmFormatName="qwen3")
|
||
|
||
println("\ncheckinventory result ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
println(textresult)
|
||
|
||
return (result=textresult, rawresponse=rawresponse, success=true, errormsg=nothing)
|
||
end
|
||
|
||
|
||
"""
|
||
|
||
# Arguments
|
||
- `v::Integer`
|
||
dummy variable
|
||
|
||
# Return
|
||
|
||
# Example
|
||
```jldoctest
|
||
julia>
|
||
```
|
||
|
||
# TODO
|
||
- [] update docstring
|
||
- implement the function
|
||
|
||
# Signature
|
||
"""
|
||
function extractWineAttributes_1(a::T1, input::T2; maxattempt=10
|
||
)::String where {T1<:agent, T2<:AbstractString}
|
||
|
||
systemmsg =
|
||
"""
|
||
As a helpful sommelier, your task is to extract the user information from the user's query as much as possible to fill out user's preference form.
|
||
|
||
At each round of conversation, the user will give you the following:
|
||
User's query: ...
|
||
|
||
You must follow the following guidelines:
|
||
- If specific information required in the preference form is not available in the query or there isn't any, mark with "NA" to indicate this.
|
||
Additionally, words like 'any' or 'unlimited' mean no information is available.
|
||
- Do not generate other comments.
|
||
|
||
You should then respond to the user with:
|
||
Thought: state your understanding of the current situation
|
||
Wine_name: name of the wine
|
||
Winery: name of the winery
|
||
Vintage: the year of the wine
|
||
Region: a region (NOT a country) where the wine is produced, such as Burgundy, Napa Valley, etc
|
||
Country: a country where the wine is produced. Can be "Austria", "Australia", "France", "Germany", "Italy", "Portugal", "Spain", "United States"
|
||
Wine_type: can be one of: "red", "white", "sparkling", "rose", "dessert" or "fortified"
|
||
Grape_varietal: the name of the primary grape used to make the wine
|
||
Tasting_notes: a brief description of the wine's taste, such as "butter", "oak", "fruity", etc
|
||
Wine_price: price range of wine.
|
||
Occasion: the occasion the user is having the wine for
|
||
Food_to_be_paired_with_wine: food that the user will be served with the wine such as poultry, fish, steak, etc
|
||
|
||
You should only respond in format as described below:
|
||
Thought: ...
|
||
Wine_name: ...
|
||
Winery: ...
|
||
Vintage: ...
|
||
Region: ...
|
||
Country: ...
|
||
Wine_type:
|
||
Grape_varietal: ...
|
||
Tasting_notes: ...
|
||
Wine_price: ...
|
||
Occasion: ...
|
||
Food_to_be_paired_with_wine: ...
|
||
|
||
Here are some example:
|
||
User's query: red, Chenin Blanc, Riesling, 20 USD
|
||
{"reasoning": ..., "winery": "NA", "wine_name": "NA", "vintage": "NA", "region": "NA", "country": "NA", "wine_type": "red, white", "grape_varietal": "Chenin Blanc, Riesling", "tasting_notes": "NA", "wine_price": "0-20", "occasion": "NA", "food_to_be_paired_with_wine": "NA"}
|
||
|
||
User's query: Domaine du Collier Saumur Blanc 2019, France, white, Merlot
|
||
{"reasoning": ..., "winery": "Domaine du Collier", "wine_name": "Saumur Blanc", "vintage": "2019", "region": "Saumur", "country": "France", "wine_type": "white", "grape_varietal": "Merlot", "tasting_notes": "NA", "wine_price": "NA", "occasion": "NA", "food_to_be_paired_with_wine": "NA"}
|
||
|
||
Let's begin!
|
||
"""
|
||
|
||
header = ["Thought:", "Wine_name:", "Winery:", "Vintage:", "Region:", "Country:", "Wine_type:", "Grape_varietal:", "Tasting_notes:", "Wine_price:", "Occasion:", "Food_to_be_paired_with_wine:"]
|
||
dictkey = ["thought", "wine_name", "winery", "vintage", "region", "country", "wine_type", "grape_varietal", "tasting_notes", "wine_price", "occasion", "food_to_be_paired_with_wine"]
|
||
errornote = "N/A"
|
||
|
||
llmkwargs=Dict(
|
||
:num_ctx => 32768,
|
||
:temperature => 0.5,
|
||
)
|
||
|
||
for attempt in 1:maxattempt
|
||
#[PENDING] I should add generatequestion()
|
||
|
||
if attempt > 1
|
||
println("\nYiemAgent extractWineAttributes_1() attempt $attempt/$maxattempt ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
end
|
||
|
||
usermsg =
|
||
"""
|
||
User's query: $input
|
||
P.S. $errornote
|
||
"""
|
||
|
||
_prompt =
|
||
[
|
||
Dict(:name=> "system", :text=> systemmsg),
|
||
Dict(:name=> "user", :text=> usermsg)
|
||
]
|
||
|
||
# put in model format
|
||
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
|
||
response = a.func[:text2textInstructLLM](prompt;
|
||
modelsize="medium", llmkwargs=llmkwargs, senderId=a.id)
|
||
response = GeneralUtils.remove_french_accents(response)
|
||
response = GeneralUtils.deFormatLLMtext(response, "granite3")
|
||
|
||
# check wheter all attributes are in the response
|
||
checkFlag = false
|
||
for word in header
|
||
if !occursin(word, response)
|
||
errornote = "In your previous attempts, the $word attribute is missing. Please try again."
|
||
println("\nYiemAgent extractWineAttributes_1() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
checkFlag = true
|
||
break
|
||
end
|
||
end
|
||
checkFlag == true ? continue : nothing
|
||
|
||
# check whether response has all answer's key points
|
||
detected_kw = GeneralUtils.detect_keyword(header, response)
|
||
if 0 ∈ values(detected_kw)
|
||
errornote = "In your previous attempts, the response does not have all answer's key points"
|
||
println("\nYiemAgent extractWineAttributes_1() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
continue
|
||
elseif sum(values(detected_kw)) > length(header)
|
||
errornote = "In your previous attempts, the response has duplicated answer's key points"
|
||
println("\nYiemAgent extractWineAttributes_1() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
continue
|
||
end
|
||
responsedict = GeneralUtils.textToDict(response, header;
|
||
dictKey=dictkey, symbolkey=true)
|
||
|
||
delete!(responsedict, :thought)
|
||
delete!(responsedict, :tasting_notes)
|
||
delete!(responsedict, :occasion)
|
||
delete!(responsedict, :food_to_be_paired_with_wine)
|
||
|
||
println(@__FILE__, " ", @__LINE__)
|
||
pprintln(responsedict)
|
||
|
||
# check if winery, wine_name, region, country, wine_type, grape_varietal's value are in the query because sometime AI halucinates
|
||
checkFlag = false
|
||
for i in dictkey
|
||
j = Symbol(i)
|
||
if j ∉ [:thought, :tasting_notes, :occasion, :food_to_be_paired_with_wine]
|
||
# in case j is wine_price it needs to be checked differently because its value is ranged
|
||
if j == :wine_price
|
||
if responsedict[:wine_price] != "NA"
|
||
# check whether wine_price is in ranged number
|
||
if !occursin('-', responsedict[:wine_price])
|
||
errornote = "In your previous attempt, the 'wine_price' was not set to a ranged number. Please adjust it accordingly."
|
||
println("\nERROR YiemAgent extractWineAttributes_1() $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
checkFlag = true
|
||
break
|
||
end
|
||
|
||
# check whether max wine_price is in the input
|
||
pricerange = split(responsedict[:wine_price], '-')
|
||
minprice = pricerange[1]
|
||
maxprice = pricerange[end]
|
||
if !occursin(maxprice, input)
|
||
responsedict[:wine_price] = "NA"
|
||
end
|
||
# price range like 100-100 is not good
|
||
if minprice == maxprice
|
||
errornote = "In your previous attempt, you inputted 'wine_price' with a 'minimum' value equaling the 'maximum', which is not valid."
|
||
println("\nERROR YiemAgent extractWineAttributes_1() $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
checkFlag = true
|
||
break
|
||
end
|
||
end
|
||
else
|
||
content = responsedict[j]
|
||
if typeof(content) <: AbstractVector
|
||
content = strip.(content)
|
||
elseif occursin(',', content)
|
||
content = split(content, ",") # sometime AI generates multiple values e.g. "Chenin Blanc, Riesling"
|
||
content = strip.(content)
|
||
else
|
||
content = [content]
|
||
end
|
||
|
||
# for x in content #check whether price are mentioned in the input
|
||
# if !occursin("NA", responsedict[j]) && !occursin(x, input)
|
||
# errornote = "$x is not mentioned in the user query, you must only use the info from the query."
|
||
# println("ERROR YiemAgent extractWineAttributes_1() $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
# checkFlag == true
|
||
# break
|
||
# end
|
||
# end
|
||
end
|
||
end
|
||
end
|
||
checkFlag == true ? continue : nothing # skip the rest code if true
|
||
|
||
# remove (some text)
|
||
for (k, v) in responsedict
|
||
_v = replace(v, r"\(.*?\)" => "")
|
||
responsedict[k] = _v
|
||
end
|
||
|
||
result = ""
|
||
for (k, v) in responsedict
|
||
# some time LLM generate text with "(some comment)". this line removes it
|
||
if !occursin("NA", v) && v != "" && !occursin("none", v) && !occursin("None", v)
|
||
result *= "$k: $v, "
|
||
end
|
||
end
|
||
|
||
#[PENDING] remove halucination. "highend dry white wine" --> "wine_type: white, occasion: special occasion, food_to_be_paired_with_wine: seafood, fish, country: France, Italy, USA, grape_varietal: Chardonnay, Sauvignon Blanc, Pinot Grigio\nwine_notes: citrus, green apple, floral"
|
||
|
||
result = result[1:end-2] # remove the ending ", "
|
||
|
||
return result
|
||
end
|
||
error("wineattributes_wordToNumber() failed to get a response")
|
||
end
|
||
|
||
"""
|
||
# TODO
|
||
- [PENDING] "French dry white wines with medium bod" the LLM does not recognize sweetness. use LLM self questioning to solve.
|
||
- [PENDING] French Syrah, Viognier, under 100. LLM extract intensiry of 3-5. why?
|
||
"""
|
||
function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<:AbstractString}
|
||
|
||
conversiontable =
|
||
"""
|
||
<Conversion Table>
|
||
Intensity level:
|
||
1 to 2: May correspond to "light-bodied" or a similar description.
|
||
2 to 3: May correspond to "med light bodied", "medium light" or a similar description.
|
||
3 to 4: May correspond to "medium bodied" or a similar description.
|
||
4 to 5: May correspond to "med full bodied", "medium full" or a similar description.
|
||
4 to 5: May correspond to "full bodied" or a similar description.
|
||
Sweetness level:
|
||
1 to 2: May correspond to "dry", "no sweet" or a similar description.
|
||
2 to 3: May correspond to "off dry", "less sweet" or a similar description.
|
||
3 to 4: May correspond to "semi sweet" or a similar description.
|
||
4 to 5: May correspond to "sweet" or a similar description.
|
||
4 to 5: May correspond to "very sweet" or a similar description.
|
||
Tannin level:
|
||
1 to 2: May correspond to "low tannin" or a similar description.
|
||
2 to 3: May correspond to "semi low tannin" or a similar description.
|
||
3 to 4: May correspond to "medium tannin" or a similar description.
|
||
4 to 5: May correspond to "semi high tannin" or a similar description.
|
||
4 to 5: May correspond to "high tannin" or a similar description.
|
||
Acidity level:
|
||
1 to 2: May correspond to "low acidity" or a similar description.
|
||
2 to 3: May correspond to "semi low acidity" or a similar description.
|
||
3 to 4: May correspond to "medium acidity" or a similar description.
|
||
4 to 5: May correspond to "semi high acidity" or a similar description.
|
||
4 to 5: May correspond to "high acidity" or a similar description.
|
||
</Conversion Table>
|
||
"""
|
||
|
||
systemmsg =
|
||
"""
|
||
As an helpful sommelier, your task is to fill out the user's preference form based on the corresponding words from the user's query.
|
||
|
||
At each round of conversation, the user will give you the current situation:
|
||
Conversion Table: ...
|
||
User's query: ...
|
||
|
||
The preference form requires the following information:
|
||
sweetness, acidity, tannin, intensity
|
||
|
||
You must follow the following guidelines:
|
||
1) If specific information required in the preference form is not available in the query or there isn't any, mark with 'NA' to indicate this.
|
||
Additionally, words like 'any' or 'unlimited' mean no information is available.
|
||
2) Use the conversion table to convert the descriptive word level of sweetness, intensity, tannin, and acidity into a corresponding integer.
|
||
3) Do not generate other comments.
|
||
You should then respond to the user with:
|
||
Sweetness_keyword: The exact keywords in the user's query describing the sweetness level of the wine.
|
||
Sweetness: ( S ), where ( S ) represents integers indicating the range of sweetness levels. Example: 1-2
|
||
Acidity_keyword: The exact keywords in the user's query describing the acidity level of the wine.
|
||
Acidity: ( A ), where ( A ) represents integers indicating the range of acidity level. Example: 3-5
|
||
Tannin_keyword: The exact keywords in the user's query describing the tannin level of the wine.
|
||
Tannin: ( T ), where ( T ) represents integers indicating the range of tannin level. Example: 1-3
|
||
Intensity_keyword: The exact keywords in the user's query describing the intensity level of the wine.
|
||
Intensity: ( I ), where ( I ) represents integers indicating the range of intensity level. Example: 2-4
|
||
You should only respond in format as described below:
|
||
Sweetness_keyword: ...
|
||
Sweetness: ...
|
||
Acidity_keyword: ...
|
||
Acidity: ...
|
||
Tannin_keyword: ...
|
||
Tannin: ...
|
||
Intensity_keyword: ...
|
||
Intensity: ...
|
||
|
||
Here are some examples:
|
||
User's query: I want a wine with a medium-bodied, low acidity, medium tannin.
|
||
Sweetness_keyword: NA
|
||
Sweetness: NA
|
||
Acidity_keyword: low acidity
|
||
Acidity: 1-2
|
||
Tannin_keyword: medium tannin
|
||
Tannin: 3-4
|
||
Intensity_keyword: medium-bodied
|
||
Intensity: 3-4
|
||
|
||
User's query: German red wine, under 100, pairs with spicy food
|
||
Sweetness_keyword: NA
|
||
Sweetness: NA
|
||
Acidity_keyword: NA
|
||
Acidity: NA
|
||
Tannin_keyword: NA
|
||
Tannin: NA
|
||
Intensity_keyword: NA
|
||
Intensity: NA
|
||
|
||
Let's begin!
|
||
"""
|
||
header = ["Sweetness_keyword:", "Sweetness:", "Acidity_keyword:", "Acidity:", "Tannin_keyword:", "Tannin:", "Intensity_keyword:", "Intensity:"]
|
||
dictkey = ["sweetness_keyword", "sweetness", "acidity_keyword", "acidity", "tannin_keyword", "tannin", "intensity_keyword", "intensity"]
|
||
errornote = ""
|
||
|
||
for attempt in 1:10
|
||
usermsg =
|
||
"""
|
||
$conversiontable
|
||
User's query: $input
|
||
P.S. $errornote
|
||
"""
|
||
|
||
_prompt =
|
||
[
|
||
Dict(:name=> "system", :text=> systemmsg),
|
||
Dict(:name=> "user", :text=> usermsg)
|
||
]
|
||
|
||
# put in model format
|
||
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
|
||
|
||
response = a.func[:text2textInstructLLM](prompt)
|
||
response = GeneralUtils.deFormatLLMtext(response, "granite3")
|
||
|
||
# check whether response has all answer's key points
|
||
detected_kw = GeneralUtils.detect_keyword(header, response)
|
||
if 0 ∈ values(detected_kw)
|
||
errornote = "In your previous attempt does not have all answer's key points"
|
||
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
continue
|
||
elseif sum(values(detected_kw)) > length(header)
|
||
errornote = "In your previous attempt has duplicated answer's key points"
|
||
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
continue
|
||
end
|
||
|
||
responsedict = GeneralUtils.textToDict(response, header;
|
||
dictKey=dictkey, symbolkey=true)
|
||
|
||
# check whether each describing keyword is in the input to prevent halucination
|
||
for i in ["sweetness", "acidity", "tannin", "intensity"]
|
||
keyword = Symbol(i * "_keyword") # e.g. sweetness_keyword
|
||
value = responsedict[keyword]
|
||
if value != "NA" && !occursin(value, input)
|
||
errornote = "In your previous attempt, keyword $keyword: $value does not appear in the input. You must use information from the input only"
|
||
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
continue
|
||
end
|
||
|
||
# if value == "NA" then responsedict[i] = "NA"
|
||
# e.g. if sweetness_keyword == "NA" then sweetness = "NA"
|
||
if value == "NA"
|
||
responsedict[Symbol(i)] = "NA"
|
||
end
|
||
end
|
||
|
||
# some time LLM not put integer range
|
||
for (k, v) in responsedict
|
||
if !occursin("keyword", string(k))
|
||
if v !== "NA" && (!occursin('-', v) || length(v) > 5)
|
||
errornote = "WARNING: The non-range value {$k: $v} is not allowed. It should be specified in a range format, i.e. min-max."
|
||
println("\nERROR YiemAgent extractWineAttributes_2() Attempt $attempt $errornote ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
continue
|
||
end
|
||
end
|
||
end
|
||
|
||
# some time LLM says NA-2. Need to convert NA to 1
|
||
for (k, v) in responsedict
|
||
if occursin("NA", v) && occursin("-", v)
|
||
new_v = replace(v, "NA"=>"1")
|
||
responsedict[k] = new_v
|
||
end
|
||
end
|
||
|
||
result = ""
|
||
for (k, v) in responsedict
|
||
# some time LLM generate text with "(some comment)". this line removes it
|
||
if !occursin("NA", v)
|
||
result *= "$k: $v, "
|
||
end
|
||
end
|
||
result = result[1:end-2] # remove the ending ", "
|
||
|
||
return result
|
||
end
|
||
error("wineattributes_wordToNumber() failed to get a response")
|
||
end
|
||
|
||
|
||
function paraphrase(text2textInstructLLM::Function, text::String)
|
||
systemmsg =
|
||
"""
|
||
Your name: N/A
|
||
Your vision:
|
||
- You are a helpful assistant who help the user to paraphrase their text.
|
||
Your mission:
|
||
- To help paraphrase the user's text
|
||
Mission's objective includes:
|
||
- To help paraphrase the user's text
|
||
Your responsibility includes:
|
||
1) To help paraphrase the user's text
|
||
Your responsibility does NOT includes:
|
||
1) N/A
|
||
Your profile:
|
||
- N/A
|
||
Additional information:
|
||
- N/A
|
||
|
||
At each round of conversation, you will be given the following information:
|
||
Text: The user's given text
|
||
|
||
You MUST follow the following guidelines:
|
||
- N/A
|
||
|
||
You should follow the following guidelines:
|
||
- N/A
|
||
|
||
You should then respond to the user with:
|
||
Paraphrase: Paraphrased text
|
||
|
||
You should only respond in format as described below:
|
||
Paraphrase: ...
|
||
|
||
Let's begin!
|
||
"""
|
||
|
||
header = ["Paraphrase:"]
|
||
dictkey = ["paraphrase"]
|
||
|
||
errornote = ""
|
||
response = nothing # placeholder for show when error msg show up
|
||
|
||
|
||
for attempt in 1:10
|
||
usermsg = """
|
||
Text: $text
|
||
P.S. $errornote
|
||
"""
|
||
|
||
_prompt =
|
||
[
|
||
Dict(:name => "system", :text => systemmsg),
|
||
Dict(:name => "user", :text => usermsg)
|
||
]
|
||
|
||
# put in model format
|
||
prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
|
||
|
||
try
|
||
response = text2textInstructLLM(prompt)
|
||
response = GeneralUtils.deFormatLLMtext(response, "granite3")
|
||
# sometime the model response like this "here's how I would respond: ..."
|
||
if occursin("respond:", response)
|
||
errornote = "You don't need to intro your response"
|
||
error("\nparaphrase() response contain : ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
end
|
||
response = GeneralUtils.remove_french_accents(response)
|
||
response = replace(response, '*'=>"")
|
||
response = replace(response, '$' => "USD")
|
||
response = replace(response, '`' => "")
|
||
response = GeneralUtils.remove_french_accents(response)
|
||
|
||
# check whether response has all answer's key points
|
||
detected_kw = GeneralUtils.detect_keyword(header, response)
|
||
if 0 ∈ values(detected_kw)
|
||
errornote = "\nYiemAgent paraphrase() response does not have all answer's key points"
|
||
continue
|
||
elseif sum(values(detected_kw)) > length(header)
|
||
errornote = "\nnYiemAgent paraphrase() response has duplicated answer's key points"
|
||
continue
|
||
end
|
||
|
||
responsedict = GeneralUtils.textToDict(response, header;
|
||
dictKey=dictkey, symbolkey=true)
|
||
|
||
for i ∈ [:paraphrase]
|
||
if length(JSON3.write(responsedict[i])) == 0
|
||
error("$i is empty ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
end
|
||
end
|
||
|
||
# check if there are more than 1 key per categories
|
||
for i ∈ [:paraphrase]
|
||
matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
|
||
if length(matchkeys) > 1
|
||
error("paraphrase() has more than one key per categories")
|
||
end
|
||
end
|
||
|
||
println("\nparaphrase() ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
pprintln(Dict(responsedict))
|
||
|
||
result = responsedict[:paraphrase]
|
||
|
||
return result
|
||
catch e
|
||
io = IOBuffer()
|
||
showerror(io, e)
|
||
errorMsg = String(take!(io))
|
||
st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
||
println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
end
|
||
end
|
||
error("paraphrase() failed to generate a response")
|
||
end
|
||
|
||
|
||
|
||
""" Attemp to correct LLM response's incorrect JSON response.
|
||
|
||
# Arguments
|
||
- `a::T1`
|
||
one of Yiem's agent
|
||
- `input::T2`
|
||
text to be send to virtual wine customer
|
||
|
||
# Return
|
||
- `correctjson::String`
|
||
corrected json string
|
||
|
||
# Example
|
||
```jldoctest
|
||
julia>
|
||
```
|
||
|
||
# Signature
|
||
"""
|
||
function jsoncorrection(config::T1, input::T2, correctJsonExample::T3;
|
||
maxattempt::Integer=3
|
||
) where {T1<:AbstractDict, T2<:AbstractString, T3<:AbstractString}
|
||
|
||
incorrectjson = deepcopy(input)
|
||
correctjson = nothing
|
||
|
||
for attempt in 1:maxattempt
|
||
try
|
||
d = copy(JSON3.read(incorrectjson))
|
||
correctjson = incorrectjson
|
||
return correctjson
|
||
catch e
|
||
@warn "Attempting to correct JSON string. Attempt $attempt"
|
||
e = """$e"""
|
||
if occursin("EOF", e)
|
||
e = split(e, "EOF")[1] * "EOF"
|
||
end
|
||
incorrectjson = deepcopy(input)
|
||
_prompt =
|
||
"""
|
||
Your goal are:
|
||
1) Use the expected JSON format as a guideline to check why the given JSON string failed to load and provide a corrected version that can be loaded by Python's json.load function.
|
||
2) Provide Corrected JSON string only. Do not provide any other info.
|
||
|
||
$correctJsonExample
|
||
|
||
Let's begin!
|
||
Given JSON string: $incorrectjson
|
||
The given JSON string failed to load previously because: $e
|
||
Corrected JSON string:
|
||
"""
|
||
|
||
# apply LLM specific instruct format
|
||
externalService = config[:externalservice][:text2textinstruct]
|
||
llminfo = externalService[:llminfo]
|
||
prompt =
|
||
if llminfo[:name] == "llama3instruct"
|
||
formatLLMtext_llama3instruct("system", _prompt)
|
||
else
|
||
error("llm model name is not defied yet $(@__LINE__)")
|
||
end
|
||
|
||
# send formatted input to user using GeneralUtils.sendReceiveMqttMsg
|
||
msgMeta = GeneralUtils.generate_msgMeta(
|
||
externalService[:mqtttopic],
|
||
senderName= "jsoncorrection",
|
||
senderId= string(uuid4()),
|
||
receiverName= "text2textinstruct",
|
||
mqttBroker= config[:mqttServerInfo][:broker],
|
||
mqttBrokerPort= config[:mqttServerInfo][:port],
|
||
)
|
||
|
||
outgoingMsg = Dict(
|
||
:msgMeta=> msgMeta,
|
||
:payload=> Dict(
|
||
:text=> prompt,
|
||
:kwargs=> Dict(
|
||
:max_tokens=> 512,
|
||
:stop=> ["<|eot_id|>"],
|
||
)
|
||
)
|
||
)
|
||
result = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=120)
|
||
incorrectjson = result[:response][:text]
|
||
end
|
||
end
|
||
end
|
||
|
||
|
||
# function isrecommend(state::T1, text2textInstructLLM::Function
|
||
# ) where {T1<:AbstractDict}
|
||
|
||
# systemmsg =
|
||
# """
|
||
# You are a helpful assistant that analyzes agent's trajectories to find solutions and observations (i.e., the results of actions) to answer the user's questions.
|
||
|
||
# Definitions:
|
||
# "question" is the user's question.
|
||
# "thought" is step-by-step reasoning about the current situation.
|
||
# "plan" is what to do to complete the task from the current situation.
|
||
# “action_name” is the name of the action taken, which can be one of the following functions:
|
||
# 1) CHATBOX[text], which you can use to talk with the user. "text" is in verbal English.
|
||
# 2) WINESTOCK[query], which you can use to find info about wine in your inventory. "query" is a search term in verbal English. The best query must includes "budget", "type of wine", "characteristics of wine" and "food pairing".
|
||
# "action_input" is the input to the action
|
||
# "observation" is result of the preceding immediate action.
|
||
|
||
# At each round of conversation, the user will give you:
|
||
# Context: ...
|
||
# Trajectories: ...
|
||
|
||
# You should then respond to the user with:
|
||
# 1) trajectory_evaluation:
|
||
# - Analyze the trajectories of a solution to answer the user's original question.
|
||
# Then given a question and a trajectory, evaluate its correctness and provide your reasoning and
|
||
# analysis in detail. Focus on the latest thought, action, and observation.
|
||
# Incomplete trajectories can be correct if the thoughts and actions so far are correct,
|
||
# even if the answer is not found yet. Do not generate additional thoughts or actions.
|
||
# 2) answer_evaluation: Focus only on the matter mentioned in the question and analyze how the latest observation addresses the question.
|
||
# 3) accepted_as_answer: Decide whether the latest observation's content answers the question. The possible responses are either 'Yes' or 'No.'
|
||
# Bad example (The observation didn't answers the question):
|
||
# question: Find cars with 4 wheels.
|
||
# observation: There are 2 cars in the table.
|
||
# Good example (The observation answers the question):
|
||
# question: Find cars with a stereo.
|
||
# observation: There are 1 cars in the table. 1) brand: Toyota, model: yaris, color: black.
|
||
# 4) score: Correctness score s where s is a single integer between 0 to 9.
|
||
# - 0 means the trajectories are incorrect.
|
||
# - 9 means the trajectories are correct, and the observation's content directly answers the question.
|
||
# 5) suggestion: if accepted_as_answer is "No", provide suggestion.
|
||
|
||
# You should only respond in format as described below:
|
||
# trajectory_evaluation: ...
|
||
# answer_evaluation: ...
|
||
# accepted_as_answer: ...
|
||
# score: ...
|
||
# suggestion: ...
|
||
|
||
# Let's begin!
|
||
# """
|
||
|
||
# thoughthistory = ""
|
||
# for (k, v) in state[:thoughtHistory]
|
||
# thoughthistory *= "$k: $v\n"
|
||
# end
|
||
|
||
# usermsg =
|
||
# """
|
||
# Context: None
|
||
# Trajectories: $thoughthistory
|
||
# """
|
||
|
||
# _prompt =
|
||
# [
|
||
# Dict(:name=> "system", :text=> systemmsg),
|
||
# Dict(:name=> "user", :text=> usermsg)
|
||
# ]
|
||
|
||
# # put in model format
|
||
# prompt = GeneralUtils.formatLLMtext(_prompt, "granite3")
|
||
# prompt *=
|
||
# """
|
||
# <|start_header_id|>assistant<|end_header_id|>
|
||
# """
|
||
|
||
# for attempt in 1:5
|
||
# try
|
||
# response = text2textInstructLLM(prompt)
|
||
# responsedict = GeneralUtils.textToDict(response,
|
||
# ["trajectory_evaluation", "answer_evaluation", "accepted_as_answer", "score", "suggestion"],
|
||
# rightmarker=":", symbolkey=true)
|
||
|
||
# # check if dict has all required value
|
||
# trajectoryevaluation_text::AbstractString = responsedict[:trajectory_evaluation]
|
||
# answerevaluation_text::AbstractString = responsedict[:answer_evaluation]
|
||
# responsedict[:score] = parse(Int, responsedict[:score]) # convert string "5" into integer 5
|
||
# score::Integer = responsedict[:score]
|
||
# accepted_as_answer::AbstractString = responsedict[:accepted_as_answer]
|
||
# suggestion::AbstractString = responsedict[:suggestion]
|
||
|
||
# # add to state here instead to in transition() because the latter causes julia extension crash (a bug in julia extension)
|
||
# state[:evaluation] = "$(responsedict[:trajectory_evaluation]) $(responsedict[:answer_evaluation])"
|
||
# state[:evaluationscore] = responsedict[:score]
|
||
# state[:accepted_as_answer] = responsedict[:accepted_as_answer]
|
||
# state[:suggestion] = responsedict[:suggestion]
|
||
|
||
# # mark as terminal state when the answer is achieved
|
||
# if accepted_as_answer == "Yes"
|
||
# state[:isterminal] = true
|
||
# state[:reward] = 1
|
||
# end
|
||
# println("--> 5 Evaluator ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
|
||
# pprintln(Dict(responsedict))
|
||
# return responsedict[:score]
|
||
# catch e
|
||
# io = IOBuffer()
|
||
# showerror(io, e)
|
||
# errorMsg = String(take!(io))
|
||
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
|
||
# println("")
|
||
# println("Attempt $attempt. Error occurred: $errorMsg\n$st")
|
||
# println("")
|
||
# end
|
||
# end
|
||
# error("evaluator failed to generate an evaluation")
|
||
# end
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
|
||
end # module llmfunction |