环境
环境就是生成一个4*4的矩阵 初始位置在(0,0) 目标位置在(3,3) 走到(3,3)给奖励200 动作空间[‘up’, ‘down’, ‘left’, ‘right’]
代码
class maze_env:
def __init__(self,row=4,column=4):
self.done = False
self.row = row
self.column = column
self.maze = torch.zeros(self.row,self.column)
self.target_x = row-1
self.target_y = column-1
self.x = 0
self.y = 0
self.maze[self.x][self.y] = 1
def show_maze(self):
print(self.maze)
def step(self,action):
r = 0
self.maze[self.x][self.y] = 0
if action == 'up' and self.y >= 1:
self.y -= 1
if action == 'right' and self.y <= self.row - 2:
self.y += 1
if action == 'left' and self.x >= 1:
self.x -= 1
if action == 'down' and self.x <= self.column - 2:
self.x += 1
self.maze[self.x][self.y] = 1
if self.x == self.target_x and self.y == self.target_y:
self.done = True
r = 200
return (self.x,self.y),r,self.done
def reset(self):
self.done = False
self.maze = torch.zeros(self.row,self.column)
self.x = 0
self.y = 0
self.maze[self.x][self.y] = 1
return (0, 0)
智能体
能够学习价值函数
代码
class Qlearning:
def __init__(self, actions, learning_rate=0.1, reward_decay=0.9, e_greedy=0.9):
self.actions = actions
self.lr = learning_rate
self.gamma = reward_decay
self.epsilon = e_greedy
self.q_table = pd.DataFrame(columns=self.actions, dtype=np.float64)
def choose_action(self, observation):
self.check_state_exist(observation)
if np.random.uniform() < self.epsilon:
state_action = self.q_table.loc[observation, :]
action = np.random.choice(state_action[state_action == np.max(state_action)].index)
else:
action = np.random.choice(self.actions)
return action
def check_state_exist(self, state):
if state not in self.q_table.index:
self.q_table = self.q_table.append(
pd.Series(
[0]*len(self.actions),
index=self.q_table.columns,
name=state,
)
)
def learn(self, s, a, r, s_,done):
self.check_state_exist(s_)
q_predict = self.q_table.loc[s, a]
if not done:
q_target = r + self.gamma * self.q_table.loc[s_, :].max()
else:
q_target = r
self.q_table.loc[s, a] += self.lr * (q_target - q_predict)
主程序
maze = maze_env()
agent = Qlearning(['up', 'down', 'left', 'right'])
for i in range(500):
observation = maze.reset()
action_cnt = 0
while True:
action = agent.choose_action(str(observation))
observation_,r,done = maze.step(action)
agent.learn(str(observation),action,r,str(observation_),done)
observation = observation_
action_cnt = action_cnt + 1
if done:
print(agent.q_table)
print("一共移动了",action_cnt)
break
最终的q表
观察在(1,0)的价值发现90%向左 观察在(0,1)的价值发现90%向下
因为有90%总是选择最优 所以它只探索到了一条最优解 即(0,0)->(0,1)->(1,1)->(1,2)->(2,2)->(3,2)->(3,3)
|