ic商城网站建设,有没有好玩的网页游戏,新网站建设怎么样,电商网站开发岗位职责这里直接融合了first visit和every visit#xff0c;当选择every visit#xff0c;策略更新使用stochastic的epsilon greedy#xff1b;选择first visit#xff0c;策略更新使用greedy。理论基础#xff1a;需要说明#xff1a;1. 由于我发现agent大多数时候更倾向于呆在…这里直接融合了first visit和every visit当选择every visit策略更新使用stochastic的epsilon greedy选择first visit策略更新使用greedy。理论基础需要说明1. 由于我发现agent大多数时候更倾向于呆在原地因为走到终点的reward太小而走到forbidden或者boundary的reward又是很大的负数因此呆在原地是长远考虑。因此我增加了r_stay当模型决定留在原地就给一定的惩罚。在env.py中添加即可。同时注意测试时r_boundary和r_forbidden不应该设置的太小。2. 在env.py的step方法中需要调整可以允许agent进入forbidden区域。if not (0 ni self.size and 0 nj self.size): next_state self.state_id(i,j) else: next_state self.state_id(ni,nj)from collections import defaultdict import numpy as np from env import GridWorldEnv from utils import drow_policy class MonteCarloPolicyIteration(object): def __init__(self, env: GridWorldEnv, gamma0.9, samples1, modefirst visit): self.env env self.action_space_size self.env.num_actions # 上下左右原地 self.reward_space_size self.env.reward_space_size # 执行每个动作的reward self.state_space_size self.env.num_states self.reward_list self.env.reward_list self.gamma gamma self.samples samples self.mode mode self.policy np.ones((self.state_space_size, self.action_space_size)) / self.action_space_size self.state_value np.zeros((self.env.size, self.env.size)) self.qvalues np.zeros((self.state_space_size, self.action_space_size)) self.returns np.zeros((self.state_space_size, self.action_space_size)) # 必须初始化为0不是zeros_like self.nums np.zeros((self.state_space_size, self.action_space_size)) def solve(self, iterations20, epsilon0.1): :param iterations: 迭代的次数 :param epsilon: epsilon greedy[0,1] epsilon0greedy就选择best actionepsilon1:stochastic选择所有action的概率相同 for i in range(iterations): for _ in range(self.samples): # 随机选择一个非终点状态作为起始状态,确保所有的状态都能被充分访问 non_terminal_states [i for i in range(self.state_space_size) if i not in self.env.terminal] s np.random.choice(non_terminal_states) a np.random.choice(self.action_space_size, pself.policy[s]) # 按policy采样 episode self.generate_episodes(s, a) self.update_q_from_episode(episode) for s in range(self.state_space_size): if s in self.env.terminal: self.policy[s] np.eye(self.action_space_size)[4] else: best_a np.argmax(self.qvalues[s]) if self.modeevery visit: # 如果是first visit很多(s,t)可能被访问了很多次但是却只用它做了一次action value的估计 # epsilon greedy self.policy[s] epsilon / self.action_space_size # 给其他action小概率 self.policy[s, best_a] 1 - epsilon # 给最有可能的action大概率 elif self.modefirst visit: # 实际对应epsilon0的情况 self.policy[s]np.eye(self.action_space_size)[best_a] self.state_value np.sum(self.policy * self.qvalues, axis1).reshape(self.env.size, self.env.size) def generate_episodes(self, start_state, start_action, max_steps200): :param start_state: 当前状态的state_id :param start_action: 当前动作 :return: [(state_id, actionreward),(...)] episode [] state start_state action start_action for _ in range(max_steps): next_state, reward, done self.env.step(state, action) episode.append((state, action, reward)) if done: break state next_state action np.random.choice(self.action_space_size, pself.policy[state]) # 从[0,action_space_size)随机选一个每个action的概率为policy[state] return episode def update_q_from_episode(self, episode): G 0 visit set() for s, a, r in reversed(episode): # 如果直接使用reversed(episode)就会同时把tuple内部也反转了 G r self.gamma * G if self.mode first visit: if (s, a) not in visit: self.returns[s, a] G self.nums[s, a] 1 self.qvalues[s, a] self.returns[s, a] / self.nums[s, a] elif self.mode every visit: self.returns[s, a] G self.nums[s, a] 1 self.qvalues[s, a] self.returns[s, a] / self.nums[s, a] else: raise Exception(Invalid mode) if __name__ __main__: env GridWorldEnv( size5, forbidden[(1, 2),(3,3)], terminal[(4,4)], r_boundary-1, r_other-0.04, r_terminal1, r_forbidden-1, r_stay-0.1 ) vi MonteCarloPolicyIteration(envenv, gamma0.9, samples10, modeevery visit) vi.solve(iterations10000, epsilon0.3) # 只有modeevery visit才需要传入epsilon print(\n state value: ) print(vi.state_value) drow_policy(vi.policy, env)对于相同的配置iteration100、1000、10000时策略分别是可以发现iteration越大策略越优。由于stochastic因此相同的配置运行多次结果也很大概率不同大多数时候agent在进行一些exploration因此看起来策略并不是最好的。因此epsilon greedy实际上是牺牲了最优性换取了更多的explorationepsilon越小越接近最优greedyepsilon越大跑的时间也越长。