import numpy as np # 状态转移概率矩阵 P = np.array([ [0.9, 0.1, 0.0, 0.0, 0.0, 0.0], [0.5, 0.0, 0.5, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.6, 0.0, 0.4], [0.0, 0.0, 0.0, 0.0, 0.3, 0.7], [0.0, 0.2, 0.3, 0.5, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0, 1.0], ]) # 到达每一个状态的奖励 R = np.array([-1, -2, -2, 10, 1, 0]) P, R # 给定一个序列,计算回报 def value_by_chain(chain): V = 0 for i, e in enumerate(chain): V += 0.5 ** i * R[e] return V # 梯度下降法,迭代计算贝尔曼矩阵 def get_bellman(): # 初始化values value = np.ones(6) for _ in range(100000): for i in range(6): # 反复迭代,收敛至贝尔曼方程矩阵 value[i] = R[i] + 0.5 * P[i].dot(value) return value get_bellman()
标签:02,0.0,0.5,value,矩阵,马尔科夫,贝尔曼,np From: https://www.cnblogs.com/demo-deng/p/16871493.html