|
马上注册,结交更多好友,享用更多功能^_^
您需要 登录 才可以下载或查看,没有账号?立即注册
x
我在网上找到了用贪心算法实现多摇臂老虎机强化学习的代码,把贪心算法换成了softmax算法,但是关于老虎机各个摇臂被选中的概率有一些疑问,代码如下:
- import numpy as np
- import random
- #老虎机的定义
- class Bandit:
- def __init__(self, arms_prob):
- self.arms_prob = arms_prob
- self.size = len(self.arms_prob)
- def play(self, i):
- if not 0 <= i < self.size:
- return -1
- else:
- if random.uniform(0, 1) < self.arms_prob[i]:
- return 1
- else:
- return 0
- class Model:
- def __init__(self, bandit, temperature=0.2, training_epochs=10000):
- self.bandit = bandit #摇臂老虎机
- self.temperature = temperature #温度参数
- self.training_epochs = training_epochs #尝试次数
- self.values = np.zeros(bandit.size) #摇臂的平均奖赏
- self.times = np.zeros(bandit.size) #尝试次数
- self.p = [1/bandit.size for i in range(bandit.size)] #摇臂的概率分布
- self.size = bandit.size #摇臂数
- self.result = 0 #回报率最高的摇臂编号
- #计算摇臂选择的概率分布
- def probability(self,index):
- self.a = 0
- for i in range(bandit.size):
- self.a += np.e ** (self.values[i] / self.temperature)
- return (np.e ** (self.values[index] / self.temperature)) / self.a
- def train(self):
- i = 0
- r = 0 #最终的奖赏
- while i <= self.training_epochs:
- index = random.choices([i for i in range(bandit.size)],self.p)[0]#根据权重选取摇臂
- reward = self.bandit.play(index)
- assert reward >= 0 and index >= 0
- self.times[index] += 1
- self.values[index] = 1.0 / (self.times[index]+1) *(self.values[index] * self.times[index] + reward)
- for j in range(bandit.size):
- self.p[j] = self.probability(j)
- # self.p[index] = self.probability(index)
- i += 1
- print("Round %d, choose %d, reward %d " % (i, index, reward))
- print(repr(self.values))
- r += reward #累加奖赏
- # print(repr(self.p))
- sum_ = 0
- for z in range (self.size):
- sum_ += self.p[z]
- print(sum_)
- self.result = self.values.argmax(axis=0)
- print('累积奖赏:',r,' 回报率:', r / self.training_epochs)
- bandit = Bandit([0.5, 0.6, 0.8, 0.9, 0.3, 0.95, 0.96, 0.45, 0.93, 0.22, 0.65])
- model = Model(bandit, temperature=0.1, training_epochs=30000)
- # bandit = Bandit([0.4, 0.2])
- # model = Model(bandit, temperature=0.01, training_epochs=3000)
- model.train()
- print("最佳选择是 %d." % model.result)
复制代码
在47-49的三行里,我写了两种算摇臂被选中的概率的方法,前一种是每选择一次摇臂,就根据公式将所有摇臂的概率更新一次,我觉得这种似乎比较合理,后一种被我注释掉的,是只更新当次被选中的摇臂的概率。两种实验下来结果都差不多,我想问一下到底应该怎么写。 |
|