import numpy as np import random #状态转移概率矩阵 #很显然,状态4(第5行)就是重点了,要进入状态4,只能从状态2,3进入(状态2,3对于完成此项任务价值很大) P = np.array([ [0.5, 0.5, 0.0, 0.0, 0.0], [0.5, 0.0, 0.5, 0.0, 0.0], [0.0, 0.0, 0.0, 0.5, 0.5], [0.0, 0.1, 0.2, 0.2, 0.5], [0.0, 0.0, 0.0, 0.0, 0.0], ]) #反馈矩阵,-100的位置是不可能走到的。奖励 R = np.array([ [-1.0, 0.0, -100.0, -100.0, -100.0], [-1.0, -100.0, -2.0, -100.0, -100.0], [-100.0, -100.0, -100.0, -2.0, 0.0], [-100.0, 1.0, 1.0, 1.0, 10.0], [-100.0, -100.0, -100.0, -100.0, -100.0], ]) P, R # 生成一个chain(采样生成样本) def get_chain(max_lens): states = [] rewards = [] # 随机选取一个状态作为起点(非4) s = random.choice(range(4)) states.append(s) for _ in range(max_lens): # 依据P的概率分布,找到下一个状态 s_next = np.random.choice(np.arange(5), p=P[s]) # 得到对应的奖励 r = R[s, s_next] # 更新状态,继续循环 s = s_next states.append(s) rewards.append(r) if s==4: break return states, rewards # 生成N个链 def get_chains(N, max_lens): states, rewards = [], [] for _ in range(N): s, r = get_chain(max_lens) states.append(s) rewards.append(r) return states, rewards # 给定一条链,计算回报 def get_values(rewards): V = 0 for i, r in enumerate(rewards): # 折扣回报,随着步数衰减,权重越来越低 V += 0.9**i*r return V # 蒙特卡洛方法评估每个状态的价值 def get_values_by_monte_carlo(states, rewards): # 记录5个不同开头的价值 values = [[] for i in range(5)] for s, r in zip(states, rewards): # 计算不同开头的价值 values[s[0]].append(get_values(r)) # 每个开头的平均价值即时该状态的价值评估 return [np.mean(e) for e in values] get_values_by_monte_carlo(*get_chains(1000, 20)) """ [-2.26601097881321, -1.5128666270632725, 2.094763097612923, 6.982357335671139, nan] """
计算出状态2,3对于完成目标意义重大
标签:02,rewards,get,0.0,马尔科夫,states,values,蒙特卡洛,100.0 From: https://www.cnblogs.com/demo-deng/p/16871225.html