This commit is contained in:
2025-03-13 19:10:02 +07:00
parent 2036a07f46
commit a22f9c52d2
3 changed files with 69 additions and 59 deletions

View File

@@ -217,7 +217,7 @@ function decisionMaker(state::T1, context, text2textInstructLLM::Function,
# put in model format # put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen") prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
response = text2textInstructLLM(prompt) response = text2textInstructLLM(prompt) #BUG response === nothing
# LLM tends to generate observation given that it is in the input # LLM tends to generate observation given that it is in the input
response = response =
@@ -812,7 +812,7 @@ function evaluator(state::T1, text2textInstructLLM::Function;
question: Find cars with a stereo. question: Find cars with a stereo.
observation: There are 1 cars in the table. 1) brand: Toyota, model: yaris, color: black. observation: There are 1 cars in the table. 1) brand: Toyota, model: yaris, color: black.
4) Score: Correctness score s where s is a single integer between 0 to 9. 4) Score: Correctness score s where s is a single integer between 0 to 9.
Score guideline: For example:
- 0 indicates that both the trajectory is incorrect, failed or errors and the observation is incorrect or failed - 0 indicates that both the trajectory is incorrect, failed or errors and the observation is incorrect or failed
- 4 indicates that the trajectory are correct but the observation is incorrect or failed - 4 indicates that the trajectory are correct but the observation is incorrect or failed
- 8 indicates that both the trajectory are correct, and the observation's content directly answers the question. - 8 indicates that both the trajectory are correct, and the observation's content directly answers the question.
@@ -1034,7 +1034,7 @@ function reflector(config::T1, state::T2)::String where {T1<:AbstractDict, T2<:A
) )
) )
for attempt in 1:5 for attempt in 1:10
try try
response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg) response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
_responseJsonStr = response[:response][:text] _responseJsonStr = response[:response][:text]
@@ -1280,7 +1280,8 @@ function query(query::T, executeSQL::Function, text2textInstructLLM::Function;
_, _, resultState = LLMMCTS.runMCTS(initialstate, transition, transitionargs; _, _, resultState = LLMMCTS.runMCTS(initialstate, transition, transitionargs;
horizontalSampleExpansionPhase=3, horizontalSampleExpansionPhase=3,
horizontalSampleSimulationPhase=2, horizontalSampleSimulationPhase=2,
maxSimulationDepth=5, maxiterations=2, maxSimulationDepth=10,
maxiterations=1,
explorationweight=1.0, explorationweight=1.0,
earlystop=earlystop, earlystop=earlystop,
saveSimulatedNode=true) saveSimulatedNode=true)

View File

@@ -403,10 +403,8 @@ function getdata_decisionMaker(state::Dict, context::Dict, text2textInstructLLM:
- Text information in the database is sometimes stored in lower case. If your search returns empty, try using lower case to search. - Text information in the database is sometimes stored in lower case. If your search returns empty, try using lower case to search.
You should then respond to the user with: You should then respond to the user with:
1) Understanding: 1) Comprehension:
- State your understanding about the current situation. - State your comprehension about the current situation.
2) Reasoning:
- State your step by step reasoning about the current situation.
3) Plan: Step-by-step instructions of how to complete the task. 3) Plan: Step-by-step instructions of how to complete the task.
- Focus on improving the code from the last round. - Focus on improving the code from the last round.
- Do not create any table in the database. - Do not create any table in the database.
@@ -415,8 +413,7 @@ function getdata_decisionMaker(state::Dict, context::Dict, text2textInstructLLM:
- Do not wrap the code and no comment as it will be executed directly without any modification against the database. - Do not wrap the code and no comment as it will be executed directly without any modification against the database.
You should only respond in format as described below and nothing more: You should only respond in format as described below and nothing more:
Understanding: ... Comprehension: ...
Reasoning: ...
Plan: Plan:
1) ... 1) ...
2) ... 2) ...
@@ -446,15 +443,15 @@ function getdata_decisionMaker(state::Dict, context::Dict, text2textInstructLLM:
] ]
# put in model format # put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct") prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
prompt *= """
<|start_header_id|>assistant<|end_header_id|>
"""
try try
response = text2textInstructLLM(prompt) response = text2textInstructLLM(prompt)
responsedict = GeneralUtils.textToDict(response,
["Understanding", "Reasoning", "Plan", "Code"]; header = ["Comprehension:", "Plan:", "Code:"]
rightmarker=":", symbolkey=true, lowercasekey=true) dictkey = ["comprehension", "plan", "code"]
responsedict = GeneralUtils.textToDict(response, header;
dictKey=dictkey, symbolkey=true)
_code = responsedict[:code] _code = responsedict[:code]
code = strip(_code) code = strip(_code)
@@ -480,7 +477,7 @@ function getdata_decisionMaker(state::Dict, context::Dict, text2textInstructLLM:
println("\n~~~ getdata_decisionMaker() ", @__FILE__, " ", @__LINE__) println("\n~~~ getdata_decisionMaker() ", @__FILE__, " ", @__LINE__)
pprintln(Dict(responsedict)) pprintln(Dict(responsedict))
return (thought=responsedict[:reasoning], code=code, success=true, errormsg=nothing) return (thought=responsedict[:comprehension], code=code, success=true, errormsg=nothing)
catch e catch e
io = IOBuffer() io = IOBuffer()
showerror(io, e) showerror(io, e)
@@ -631,29 +628,30 @@ function extractContent_dataframe(df::DataFrame, text2textInstructLLM::Function
dfstr = GeneralUtils.dfToString(df1) dfstr = GeneralUtils.dfToString(df1)
systemmsg = """ systemmsg =
You are an assistant that readouts the resulting table after the user executing SQL command. """
You are an assistant that readouts the resulting table after the user executing SQL command.
At each round of conversation, the user will give you: At each round of conversation, the user will give you:
- User intention: ... - User intention: ...
- Resulting table dimension: ... - Resulting table dimension: ...
- Resulting table: The resulting table after executing the user's intention. - Resulting table: The resulting table after executing the user's intention.
You should then respond to the user with: You should then respond to the user with:
- About_resulting_table: - About_resulting_table:
1) What is the resulting table represent? 1) What is the resulting table represent?
- Search_summary: - Search_summary:
1) Summarize the table's content based on the user intension in verbal English. 1) Summarize the table's content based on the user intension in verbal English.
Here are some example: Here are some example:
Bad example (you are not Summarize the table content): there are 2 columns in the table i.e. "cash" and "number". Bad example (you are not Summarize the table content): there are 2 columns in the table i.e. "cash" and "number".
2) Do not generate additional text. 2) Do not generate additional text.
You should only respond in format as described below: You should only respond in format as described below:
About_resulting_table: ... About_resulting_table: ...
Search_summary: ... Search_summary: ...
Let's begin! Let's begin!
""" """
usermsg = """ usermsg = """
Resulting table: $dfstr Resulting table: $dfstr
""" """
@@ -664,20 +662,33 @@ function extractContent_dataframe(df::DataFrame, text2textInstructLLM::Function
] ]
# put in model format # put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct") prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
prompt *= """ header = ["About_resulting_table:", "Search_summary:"]
<|start_header_id|>assistant<|end_header_id|> dictkey = ["about_resulting_table", "search_summary"]
"""
for i in 1:5 for i in 1:5
response = text2textInstructLLM(prompt) response = text2textInstructLLM(prompt)
responsedict = GeneralUtils.textToDict(response, ["About_resulting_table", "Search_summary"],
rightmarker=":", symbolkey=true) kw = []
# use for loop and detect_keyword function to get the exact variation of each keyword in the text then push to kw list
for keyword in header
detected = GeneralUtils.detect_keyword(keyword, response)
push!(kw, detected)
end
if nothing kw
println("Some keywords are missing, Required keywords=$header, Response keywords=$kw ", @__FILE__, ":", @__LINE__, " $(Dates.now())")
continue # try again next loop
end
responsedict = GeneralUtils.textToDict(response, header;
dictKey=dictkey, symbolkey=true)
# result = dfstr # result = dfstr
result = """ result =
Summary: $(responsedict[:Search_summary]) """
More details: $dfstr Summary: $(responsedict[:search_summary])
""" More details: $dfstr
"""
if row > 2 if row > 2
result *= "There are many more rows, but they are truncated because there are too many of them." result *= "There are many more rows, but they are truncated because there are too many of them."
@@ -766,11 +777,11 @@ function getTableNameFromSQL(sql::T, text2textInstructLLM::Function)::Vector{Str
Query: ... Query: ...
You should then respond to the user with: You should then respond to the user with:
- table_name: a list of table name that the user mentioned in the query. - Table_name: a list of table name that the user mentioned in the query.
For example, ["color", "type"] For example, ["color", "type"]
You must only respond in format as described below: You must only respond in format as described below:
table_name: ["...", "...", ...] Table_name: ["...", "...", ...]
Let's begin! Let's begin!
""" """
@@ -786,17 +797,15 @@ function getTableNameFromSQL(sql::T, text2textInstructLLM::Function)::Vector{Str
] ]
# put in model format # put in model format
prompt = GeneralUtils.formatLLMtext(_prompt; formatname="llama3instruct") prompt = GeneralUtils.formatLLMtext(_prompt; formatname="qwen")
prompt *= """ header = ["Table_name:"]
<|start_header_id|>assistant<|end_header_id|> dictkey = ["table_name"]
"""
for attempt in 1:5 for attempt in 1:5
try try
response = text2textInstructLLM(prompt) response = text2textInstructLLM(prompt)
responsedict = GeneralUtils.textToDict(response, responsedict = GeneralUtils.textToDict(response, header;
["table_name"], dictKey=dictkey, symbolkey=true)
rightmarker=":", symbolkey=true)
response = copy(JSON3.read(responsedict[:table_name])) response = copy(JSON3.read(responsedict[:table_name]))
return response return response

View File

@@ -51,7 +51,7 @@ function text2textInstructLLM(prompt::String)
) )
) )
_response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=120, maxattempt=3) _response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg; timeout=120, maxattempt=2)
response = _response[:response][:text] response = _response[:response][:text]
return response return response
@@ -147,10 +147,10 @@ sessionId = "555"
# query = Dict(:text=> "How many wines from France do you have that can be paired with lamb?") # query = Dict(:text=> "How many wines from France do you have that can be paired with lamb?")
# query = "How many wines are from United States?" # query = "How many wines are from United States?"
# query = "retailer: Yiem, wine_type: red, sweetness: 1-2, intensity: 4-5, wine price: 20-40" query = "retailer: Yiem, wine_type: red, sweetness: 1-2, intensity: 4-5, wine price: 20-40"
# query = "wine_type: white, country: United States, sweetness: 1-2, tannin: 3, food to be served with wine: pizza" # query = "wine_type: white, country: United States, sweetness: 1-2, tannin: 3, food to be served with wine: pizza"
# query = "wine_type: white, country: Austria, food to be served with wine: pork" # query = "wine_type: white, country: Austria, food to be served with wine: pork"
query = "wine price: less than 25, wine_type: rose, country: France, sweetness: 2, tannin: 3, food to be served with wine: pizza" # query = "wine price: less than 25, wine_type: rose, country: France, sweetness: 2, tannin: 3, food to be served with wine: pizza"
# query = Dict(:text=> "wine_type: white, country: France, sweetness: 1") # query = Dict(:text=> "wine_type: white, country: France, sweetness: 1")
result = SQLLLM.query(query, executeSQL, text2textInstructLLM; result = SQLLLM.query(query, executeSQL, text2textInstructLLM;
insertSQLVectorDB=insertSQLVectorDB, insertSQLVectorDB=insertSQLVectorDB,