update
This commit is contained in:
@@ -102,53 +102,51 @@ function decisionMaker(a::T1, state::T2)::Dict{Symbol, Any} where {T1<:agent, T2
|
||||
"""
|
||||
You are a helpful sommelier working for a wine store.
|
||||
Your goal is to reccommend the best wine from your inventory that match the user preferences.
|
||||
|
||||
$customerinfo
|
||||
|
||||
|
||||
You must follow the following criteria:
|
||||
1) Get to know what occasion the user is buying wine for
|
||||
2) Get to know what food the user will have with wine
|
||||
3) Get to know how much the user willing to spend
|
||||
4) Get to know type of wine the user is looking for
|
||||
e.g. Red, White, Sparkling, Rose, Dessert, Fortified
|
||||
5) Get to know what wine characteristics the user is looking for
|
||||
e.g. tannin, sweetness, intensity, acidity
|
||||
4) Get to know type of wine the user is looking for e.g. Red, White, Sparkling, Rose, Dessert, Fortified
|
||||
5) Get to know what characteristics of wine the user is looking for
|
||||
e.g. tannin, sweetness, intensity, acidity
|
||||
6) Check your inventory for the best wine that match the user preference
|
||||
|
||||
You should only respond with interleaving step-by-step Thought, Action, Observation steps.
|
||||
7) Recommend wine to the user
|
||||
|
||||
You should only respond with interleaving Thought, Action, Observation steps.
|
||||
Thought can reason about the current situation, and Action can be three types:
|
||||
1) winestock[query], which you can use to find wine in your inventory.
|
||||
1) winestock[query], which you can use to find wine in your inventory. The more input data the better.
|
||||
2) chatbox[text], which you can use to interact with the user.
|
||||
3) finish[answer], which returns your wine reccommendation to the user.
|
||||
|
||||
3) recommendation[answer], which returns your wine reccommendation to the user.
|
||||
|
||||
You should only respond in JSON format as describe below:
|
||||
{
|
||||
"Thought_1": "reasoning 1",
|
||||
"Thought_2": "reasoning 2",
|
||||
...
|
||||
"Thought_n": "reasoning n",
|
||||
"Action_1": {"name": "action to take", "input": "Action input"},
|
||||
"Observation_1": "result of the action"
|
||||
"Thought": "your reasoning",
|
||||
"Action": {"name": "action to take", "input": "Action input"},
|
||||
"Observation": "result of the action"
|
||||
}
|
||||
|
||||
|
||||
Here are some examples:
|
||||
{
|
||||
"Question": "I'm looking for a sedan with an automatic driving feature.",
|
||||
"Thought_1": "I have many types of sedans in my inventory, each with diverse features.",
|
||||
"Thought_2": "But there is only 1 car that has the feature customer wanted.",
|
||||
"Action_1": {"name": "finish", "input": "I recommend a Tesla model Y. It has your requested feature and much more."}
|
||||
"Question": "I would like to buy a sedan with 8 seats.",
|
||||
"Thought_1": "Our showroom carries various vehicle model. But I'm not sure whether we have a models that fits the user demand, I need to check our inventory.",
|
||||
"Action_1": {"name": "inventory", "input": "sedan with 8 seats."},
|
||||
"Observation_1": "Several model has 8 seats. Available color are black, red green"
|
||||
}
|
||||
{
|
||||
"Question": "I would like to buy a sedan with 8 seats.",
|
||||
"Thought_1": "I have one model that fits the user demand",
|
||||
"Thought_2": "But I'm not sure that we have it in stock.",
|
||||
"Thought_3": "I need to check out inventory first.",
|
||||
"Action_1": {"name": "inventory", "input": "Yiem model A"}
|
||||
"Thought_2": "I have to ask the user what color he likes.",
|
||||
"Action_2": {"name": "chatbox", "input": "Which color do you like?"}
|
||||
"Observation_2": "I'll take black."
|
||||
}
|
||||
|
||||
$reflect
|
||||
{
|
||||
"Thought_3": "There is only one model that fits the user preference. It's Yiem model A",
|
||||
"Action_3": {"name": "recommendation", "input": "I recommend a Yiem model A"}
|
||||
}
|
||||
|
||||
Let's begin!
|
||||
|
||||
$(JSON3.write(state[:thoughtHistory]))
|
||||
{Thought
|
||||
"""
|
||||
|
||||
prompt = formatLLMtext_llama3instruct("system", _prompt)
|
||||
@@ -168,7 +166,7 @@ function decisionMaker(a::T1, state::T2)::Dict{Symbol, Any} where {T1<:agent, T2
|
||||
:text=> prompt,
|
||||
)
|
||||
)
|
||||
|
||||
@show outgoingMsg
|
||||
_response = GeneralUtils.sendReceiveMqttMsg(outgoingMsg)
|
||||
thoughtJsonStr = _response[:response][:text]
|
||||
thoughtDict = copy(JSON3.read(thoughtJsonStr))
|
||||
|
||||
55
src/mcts.jl
55
src/mcts.jl
@@ -52,6 +52,7 @@ struct MCTSNode{T<:AbstractDict}
|
||||
state::T
|
||||
visits::Integer
|
||||
progressValue::Number
|
||||
reward::Number
|
||||
parent::Union{MCTSNode, Nothing}
|
||||
children::Dict{String, MCTSNode}
|
||||
end
|
||||
@@ -114,10 +115,13 @@ end
|
||||
julia>
|
||||
```
|
||||
|
||||
# TODO
|
||||
- [] update docstring
|
||||
|
||||
# Signature
|
||||
"""
|
||||
function expand(a::T1, node::MCTSNode, decisionMaker::Function,
|
||||
progressValueEstimator::Function; n::Integer=3) where {T1<:agent, T2<:AbstractDict}
|
||||
progressValueEstimator::Function; n::Integer=3) where {T1<:agent}
|
||||
|
||||
# sampling action from decisionMaker
|
||||
for sample in 1:n
|
||||
@@ -127,12 +131,13 @@ function expand(a::T1, node::MCTSNode, decisionMaker::Function,
|
||||
newNodeKey, newstate = MCTStransition(a, node.state, thoughtDict) #[] Implement your transition function
|
||||
|
||||
# add progressValueEstimator
|
||||
_, progressValue = progressValueEstimator(a, newstate)
|
||||
progressRationale, progressValue = progressValueEstimator(a, newstate)
|
||||
|
||||
#[WORKING] check for terminal state
|
||||
|
||||
|
||||
if newNodeKey ∉ keys(node.children)
|
||||
node.children[newNodeKey] = MCTSNode(newNodeKey, newstate, 0, progressValue,
|
||||
node.children[newNodeKey] = MCTSNode(newNodeKey, newstate, 0, progressValue, 0,
|
||||
node, Dict{String, MCTSNode}())
|
||||
end
|
||||
end
|
||||
@@ -152,31 +157,19 @@ julia>
|
||||
# TODO
|
||||
- [] update docstring
|
||||
- [WORKING] implement the function
|
||||
- [] reward only comes at terminal state
|
||||
- [] [] check for the terminal state (node.reward != 0), break if it is terminal state
|
||||
|
||||
# Signature
|
||||
"""
|
||||
function simulate(a, node::MCTSNode, max_depth::Int; n=3)
|
||||
function simulate(a, node::MCTSNode, decisionMaker, progressValueEstimator, max_depth::Int; n=3)
|
||||
|
||||
total_reward = 0.0
|
||||
for _ in 1:max_depth
|
||||
node = selectChildNode(node)
|
||||
expand(a, node, decisionMaker, progressValueEstimator, n=n)
|
||||
|
||||
# if isterminal (use for loop over node to look for childNode.reward != 0)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# #[] Implement your action selection function based on highest stateValue
|
||||
# action = select_action(state) # current state
|
||||
# state, reward = transition(state, action) # Implement transition function to a new state
|
||||
|
||||
# #[] check for the terminal state, break if it is terminal state
|
||||
# if isterminal
|
||||
|
||||
total_reward += reward
|
||||
end
|
||||
error("--> simulate")
|
||||
return total_reward
|
||||
@@ -254,11 +247,12 @@ julia> thoughtDict = Dict(
|
||||
"""
|
||||
function MCTStransition(a::T1, state::T2,
|
||||
thoughtDict::T3)::Tuple{String, Dict{Symbol, Any}} where {T1<:agent, T2<:AbstractDict, T3<:AbstractDict}
|
||||
latestThoughtKey, _ = GeneralUtils.findHighestIndexKey(thoughtDict, "Thought")
|
||||
latestActionKey, latestActionIndice = GeneralUtils.findHighestIndexKey(thoughtDict, "Action")
|
||||
_action = thoughtDict[latestActionKey]
|
||||
actionname = _action[:name]
|
||||
actioninput = _action[:input]
|
||||
println("")
|
||||
# latestThoughtKey, _ = GeneralUtils.findHighestIndexKey(thoughtDict, "Thought")
|
||||
# latestActionKey, latestActionIndice = GeneralUtils.findHighestIndexKey(thoughtDict, "Action")
|
||||
# _action = thoughtDict[:Action]
|
||||
actionname = thoughtDict[:Action][:name]
|
||||
actioninput = thoughtDict[:Action][:input]
|
||||
|
||||
# map action and input() to llm function
|
||||
response =
|
||||
@@ -272,11 +266,16 @@ function MCTStransition(a::T1, state::T2,
|
||||
|
||||
end
|
||||
|
||||
_, latestThoughtIndice = GeneralUtils.findHighestIndexKey(state[:thoughtHistory], "Thought")
|
||||
nextIndice = latestThoughtIndice === nothing ? 1 : latestThoughtIndice + 1
|
||||
latestThoughtKey = Symbol("Thought_$nextIndice")
|
||||
latestActionKey = Symbol("Action_$nextIndice")
|
||||
|
||||
# add Thought, action, observation to thoughtHistory
|
||||
newstate = deepcopy(state)
|
||||
newstate[:thoughtHistory][latestThoughtKey] = thoughtDict[latestThoughtKey]
|
||||
newstate[:thoughtHistory][latestActionKey] = thoughtDict[latestActionKey]
|
||||
latestObservationKey = Symbol("Observation_$(latestActionIndice)")
|
||||
newstate[:thoughtHistory][latestThoughtKey] = thoughtDict[:Thought]
|
||||
newstate[:thoughtHistory][latestActionKey] = thoughtDict[:Action]
|
||||
latestObservationKey = Symbol("Observation_$(nextIndice)")
|
||||
newstate[:thoughtHistory][latestObservationKey] = response
|
||||
|
||||
newNodeKey = GeneralUtils.uuid4snakecase()
|
||||
@@ -398,7 +397,7 @@ function runMCTS(
|
||||
maxIterations::Integer,
|
||||
w::Float64) where {T1<:agent}
|
||||
|
||||
root = MCTSNode("root", initialState, 0, 0.0, nothing, Dict{String, MCTSNode}())
|
||||
root = MCTSNode("root", initialState, 0, 0, 0, nothing, Dict{String, MCTSNode}())
|
||||
|
||||
for _ in 1:maxIterations
|
||||
node = root
|
||||
@@ -410,7 +409,7 @@ function runMCTS(
|
||||
|
||||
# from paper, just start simulation at this node. Not the node that newly expanded
|
||||
startsim_node = node
|
||||
reward = simulate(a, startsim_node, maxDepth, n=n)
|
||||
reward = simulate(a, startsim_node, decisionMaker, progressValueEstimator, maxDepth, n=n)
|
||||
backpropagate(leaf_node, reward)
|
||||
end
|
||||
|
||||
|
||||
Reference in New Issue
Block a user