This commit is contained in:
narawat lamaiin
2025-01-15 06:13:18 +07:00
parent a29e8049a7
commit 2206831bab
4 changed files with 298 additions and 40 deletions

View File

@@ -669,6 +669,171 @@ function extractWineAttributes_2(a::T1, input::T2)::String where {T1<:agent, T2<
end
# function concept(a::sommelier, thoughtDict)
# systemmsg =
# """
# Your name: N/A
# Situation:
# - You are a helpful assistant
# Your vision:
# - This is a good opportunity to help the user
# Your mission:
# - To describe the concept of a conversation
# Mission's objective includes:
# - To
# Your responsibility includes:
# 1) Given the situation, convey your thoughts to the user.
# Your responsibility excludes:
# 1) Asking or guiding the user to make a purchase
# 2) Processing sales orders or engaging in any other sales-related activities
# 3) Answering questions and offering additional services beyond just recommendations, such as delivery, box, gift wrapping, personalized messages. Customers can reach out to our sales at the store.
# Your profile:
# - You are a young professional in a big company.
# - You are avid party goer
# - You like beer.
# - You know nothing about wine.
# - You have a budget of 1500usd.
# Additional information:
# - your boss like spicy food.
# - your boss is a middle-aged man.
# At each round of conversation, you will be given the following information:
# Your ongoing conversation with the user: ...
# Context: ...
# Your thoughts: Your current thoughts in your mind
# You MUST follow the following guidelines:
# - Do not offer additional services you didn't thought.
# You should follow the following guidelines:
# - Focus on the latest conversation.
# - If the user interrupts, prioritize the user
# - Medium and full-bodied red wines should not be paired with spicy foods.
# You should then respond to the user with:
# 1) Chat: Given the situation, How would you respond to the user to express your thoughts honestly and keep the conversation going smoothly?
# You should only respond in format as described below:
# Chat: ...
# Here are some examples of response format:
# Chat: "I see. Let me think about it. I'll get back to you with my recommendation."
# Let's begin!
# """
# # a.memory[:shortmem][:available_wine] is a dataframe.
# context =
# if haskey(a.memory[:shortmem], :available_wine)
# "Available wines $(GeneralUtils.dfToString(a.memory[:shortmem][:available_wine]))"
# else
# "None"
# end
# chathistory = vectorOfDictToText(a.chathistory)
# errornote = ""
# response = nothing # placeholder for show when error msg show up
# for attempt in 1:10
# usermsg = """
# Your ongoing conversation with the user: $chathistory
# Contex: $context
# Your thoughts: $(thoughtDict[:understanding]) $(thoughtDict[:reasoning]) $(thoughtDict[:plan])
# $errornote
# """
# _prompt =
# [
# Dict(:name => "system", :text => systemmsg),
# Dict(:name => "user", :text => usermsg)
# ]
# # put in model format
# prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct")
# prompt *= """
# <|start_header_id|>assistant<|end_header_id|>
# """
# try
# response = a.func[:text2textInstructLLM](prompt)
# # sometime the model response like this "here's how I would respond: ..."
# if occursin("respond:", response)
# errornote = "You don't need to intro your response"
# error("generatechat() response contain : ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
# end
# response = GeneralUtils.remove_french_accents(response)
# response = replace(response, '*'=>"")
# response = replace(response, '$' => "USD")
# response = replace(response, '`' => "")
# response = GeneralUtils.remove_french_accents(response)
# responsedict = GeneralUtils.textToDict(response, ["Chat"],
# rightmarker=":", symbolkey=true, lowercasekey=true)
# for i ∈ [:chat]
# if length(JSON3.write(responsedict[i])) == 0
# error("$i is empty ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
# end
# end
# # check if there are more than 1 key per categories
# for i ∈ [:chat]
# matchkeys = GeneralUtils.findMatchingDictKey(responsedict, i)
# if length(matchkeys) > 1
# error("generatechat has more than one key per categories")
# end
# end
# # check if Context: is in chat
# if occursin("Context:", responsedict[:chat])
# error("Context: is in text. This is not allowed")
# end
# println("\n~~~ generatechat() ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
# pprintln(Dict(responsedict))
# # check whether an agent recommend wines before checking inventory or recommend wines
# # outside its inventory
# # ask LLM whether there are any winery mentioned in the response
# mentioned_winery = detectWineryName(a, responsedict[:chat])
# if mentioned_winery != "None"
# mentioned_winery = String.(strip.(split(mentioned_winery, ",")))
# # check whether the wine is in event
# isWineInEvent = false
# for winename in mentioned_winery
# for event in a.memory[:events]
# if event[:outcome] !== nothing && occursin(winename, event[:outcome])
# isWineInEvent = true
# break
# end
# end
# end
# # if wine is mentioned but not in timeline or shortmem,
# # then the agent is not supposed to recommend the wine
# if isWineInEvent == false
# errornote = "Previously: You recommend a wine that is not in your inventory which is not allowed."
# error("Previously: You recommend a wine that is not in your inventory which is not allowed.")
# end
# end
# result = responsedict[:chat]
# return result
# catch e
# io = IOBuffer()
# showerror(io, e)
# errorMsg = String(take!(io))
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
# println("\nAttempt $attempt. Error occurred: $errorMsg\n$st ", Dates.now(), " ", @__FILE__, " ", @__LINE__)
# end
# end
# error("generatechat failed to generate a response")
# end
""" Attemp to correct LLM response's incorrect JSON response.
# Arguments