This commit is contained in:
narawat lamaiin
2024-08-03 18:58:16 +07:00
parent 51f91e8492
commit c4864a22ed
3 changed files with 177 additions and 19 deletions

View File

@@ -394,8 +394,7 @@ function extractWineAttributes_1(a::T1, input::T2
end
#[TESTING] remove tasting_notes because the database didn't prepare to be search using it
responsedict[:tasting_notes] = "NA"
responsedict[:flavor] = "NA"
responsedict[:tasting_notes] = responsedict[:flavor]
result = ""
for (k, v) in responsedict
@@ -658,6 +657,138 @@ function jsoncorrection(config::T1, input::T2, correctJsonExample::T3;
end
end
# [WORKING] check whether
# function isrecommend(state::T1, text2textInstructLLM::Function
# ) where {T1<:AbstractDict}
# systemmsg =
# """
# You are a helpful assistant that analyzes agent's trajectories to find solutions and observations (i.e., the results of actions) to answer the user's questions.
# Definitions:
# "question" is the user's question.
# "thought" is step-by-step reasoning about the current situation.
# "plan" is what to do to complete the task from the current situation.
# “action_name” is the name of the action taken, which can be one of the following functions:
# 1) CHATBOX[text], which you can use to talk with the user. "text" is in verbal English.
# 2) WINESTOCK[query], which you can use to find info about wine in your inventory. "query" is a search term in verbal English. The best query must includes "budget", "type of wine", "characteristics of wine" and "food pairing".
# "action_input" is the input to the action
# "observation" is result of the preceding immediate action.
# At each round of conversation, the user will give you:
# Context: ...
# Trajectories: ...
# You should then respond to the user with:
# 1) trajectory_evaluation:
# - Analyze the trajectories of a solution to answer the user's original question.
# Then given a question and a trajectory, evaluate its correctness and provide your reasoning and
# analysis in detail. Focus on the latest thought, action, and observation.
# Incomplete trajectories can be correct if the thoughts and actions so far are correct,
# even if the answer is not found yet. Do not generate additional thoughts or actions.
# 2) answer_evaluation: Focus only on the matter mentioned in the question and analyze how the latest observation addresses the question.
# 3) accepted_as_answer: Decide whether the latest observation's content answers the question. The possible responses are either 'Yes' or 'No.'
# Bad example (The observation didn't answers the question):
# question: Find cars with 4 wheels.
# observation: There are 2 cars in the table.
# Good example (The observation answers the question):
# question: Find cars with a stereo.
# observation: There are 1 cars in the table. 1) brand: Toyota, model: yaris, color: black.
# 4) score: Correctness score s where s is a single integer between 0 to 9.
# - 0 means the trajectories are incorrect.
# - 9 means the trajectories are correct, and the observation's content directly answers the question.
# 5) suggestion: if accepted_as_answer is "No", provide suggestion.
# You should only respond in format as described below:
# trajectory_evaluation: ...
# answer_evaluation: ...
# accepted_as_answer: ...
# score: ...
# suggestion: ...
# Let's begin!
# """
# thoughthistory = ""
# for (k, v) in state[:thoughtHistory]
# thoughthistory *= "$k: $v\n"
# end
# usermsg =
# """
# Context: None
# Trajectories: $thoughthistory
# """
# _prompt =
# [
# Dict(:name=> "system", :text=> systemmsg),
# Dict(:name=> "user", :text=> usermsg)
# ]
# # put in model format
# prompt = GeneralUtils.formatLLMtext(_prompt, "llama3instruct")
# prompt *=
# """
# <|start_header_id|>assistant<|end_header_id|>
# """
# for attempt in 1:5
# try
# response = text2textInstructLLM(prompt)
# responsedict = GeneralUtils.textToDict(response,
# ["trajectory_evaluation", "answer_evaluation", "accepted_as_answer", "score", "suggestion"],
# rightmarker=":", symbolkey=true)
# # check if dict has all required value
# trajectoryevaluation_text::AbstractString = responsedict[:trajectory_evaluation]
# answerevaluation_text::AbstractString = responsedict[:answer_evaluation]
# responsedict[:score] = parse(Int, responsedict[:score]) # convert string "5" into integer 5
# score::Integer = responsedict[:score]
# accepted_as_answer::AbstractString = responsedict[:accepted_as_answer]
# suggestion::AbstractString = responsedict[:suggestion]
# # add to state here instead to in transition() because the latter causes julia extension crash (a bug in julia extension)
# state[:evaluation] = "$(responsedict[:trajectory_evaluation]) $(responsedict[:answer_evaluation])"
# state[:evaluationscore] = responsedict[:score]
# state[:accepted_as_answer] = responsedict[:accepted_as_answer]
# state[:suggestion] = responsedict[:suggestion]
# # mark as terminal state when the answer is achieved
# if accepted_as_answer == "Yes"
# state[:isterminal] = true
# state[:reward] = 1
# end
# println("--> 5 Evaluator ", @__FILE__, " ", @__LINE__)
# pprintln(Dict(responsedict))
# return responsedict[:score]
# catch e
# io = IOBuffer()
# showerror(io, e)
# errorMsg = String(take!(io))
# st = sprint((io, v) -> show(io, "text/plain", v), stacktrace(catch_backtrace()))
# println("")
# println("Attempt $attempt. Error occurred: $errorMsg\n$st")
# println("")
# end
# end
# error("evaluator failed to generate an evaluation")
# end