import numpy as np
import pandas as pd
import time
N_STATES = 6
ACTIONS = ['left', 'right']
def build_q_table(n_states, actions):
table = pd.DataFrame(np.zeros((n_states, len(actions))),columns=actions)
return table
def choose_action(state, q_table):
state_actions = q_table.loc[state, :]
if (np.random.uniform() > 0.9) or ((state_actions == 0).all()):
action_name = np.random.choice(ACTIONS)
else:
action_name = state_actions.argmax()
return action_name
def get_env_feedback(S, A):
if A == 'right':
if S == N_STATES - 2:
S_ = 'terminal'
R = 1
else:
S_ = S + 1
R = 0
else:
R = 0
if S == 0:
S_ = S
else:
S_ = S - 1
return S_, R
def update_env(S):
env_list = ['-']*(N_STATES-1) + ['T']
if S == 'terminal':
print('\r{}'.format('-----o'), end='')
time.sleep(0.5)
else:
env_list[S] = 'o'
interaction = ''.join(env_list)
print('\r{}'.format(interaction), end='')
time.sleep(0.1)
def rl():
q_table = build_q_table(N_STATES, ACTIONS)
for _ in range(13):
S = 0
is_terminated = False
update_env(S)
while not is_terminated:
A = choose_action(S, q_table)
S_, R = get_env_feedback(S, A)
q_predict = q_table.loc[S, A]
if S_ != 'terminal':
q_target = R + 0.9 * q_table.iloc[S_, :].max()
else:
q_target = R
is_terminated = True
q_table.loc[S, A] += 0.1 * (q_target - q_predict)
S = S_
update_env(S)
return q_table
if __name__ == "__main__":
q_table = rl()
print('\r\nQ-table:\n')
print(q_table)