强化学习——Actor Critic Method
作者:互联网
强化学习——Actor Critic Method
Actor Critic Method(演员–评论家算法)
当代理在环境中执行操作和移动时,它将观察到的环境状态映射到两个可能的输出:
推荐动作:动作空间中每个动作的概率值。代理中负责此输出的部分称为actor(演员)。
未来预期回报:它预期在未来获得的所有回报的总和。负责此输出的代理部分是critic(评论家)。
演员和评论家学习执行他们的任务,这样演员推荐的动作就能获得最大的回报。
训练模型
def compute_returns(next_value, rewards, masks, gamma=0.99):
R = next_value
returns = []
for step in reversed(range(len(rewards))):
R = rewards[step] + gamma * R * masks[step]
returns.insert(0, R)
return returns
def trainIters(actor, critic, n_iters):
optimizerA = optim.Adam(lr, parameters=actor.parameters())
optimizerC = optim.Adam(lr, parameters=critic.parameters())
for iter in range(n_iters):
state = env.reset()
log_probs = []
values = []
rewards = []
masks = []
entropy = 0
env.reset()
for i in count():
# env.render()
state = paddle.to_tensor(state,dtype="float32",place=device)
dist, value = actor(state), critic(state)
action = dist.sample([1])
next_state, reward, done, _ = env.step(action.cpu().squeeze(0).numpy())
log_prob = dist.log_prob(action);
# entropy += dist.entropy().mean()
log_probs.append(log_prob)
values.append(value)
rewards.append(paddle.to_tensor([reward], dtype="float32", place=device))
masks.append(paddle.to_tensor([1-done], dtype="float32", place=device))
state = next_state
if done:
if iter % 10 == 0:
print('Iteration: {}, Score: {}'.format(iter, i))
break
next_state = paddle.to_tensor(next_state, dtype="float32", place=device)
next_value = critic(next_state)
returns = compute_returns(next_value, rewards, masks)
log_probs = paddle.concat(log_probs)
returns = paddle.concat(returns).detach()
values = paddle.concat(values)
advantage = returns - values
actor_loss = -(log_probs * advantage.detach()).mean()
critic_loss = advantage.pow(2).mean()
optimizerA.clear_grad()
optimizerC.clear_grad()
actor_loss.backward()
critic_loss.backward()
optimizerA.step()
optimizerC.step()
paddle.save(actor.state_dict(), 'model/actor.pdparams')
paddle.save(critic.state_dict(), 'model/critic.pdparams')
env.close()
if __name__ == '__main__':
if os.path.exists('model/actor.pdparams'):
actor = Actor(state_size, action_size)
model_state_dict = paddle.load('model/actor.pdparams')
actor.set_state_dict(model_state_dict )
print('Actor Model loaded')
else:
actor = Actor(state_size, action_size)
if os.path.exists('model/critic.pdparams'):
critic = Critic(state_size, action_size)
model_state_dict = paddle.load('model/critic.pdparams')
critic.set_state_dict(model_state_dict )
print('Critic Model loaded')
else:
critic = Critic(state_size, action_size)
trainIters(actor, critic, n_iters=201)
代码结果
Iteration: 0, Score: 9
Iteration: 10, Score: 13
Iteration: 20, Score: 22
Iteration: 30, Score: 20
Iteration: 40, Score: 19
Iteration: 50, Score: 21
Iteration: 60, Score: 19
Iteration: 70, Score: 49
Iteration: 80, Score: 41
Iteration: 90, Score: 41
Iteration: 100, Score: 75
Iteration: 110, Score: 199
Iteration: 120, Score: 199
Iteration: 130, Score: 199
Iteration: 140, Score: 199
Iteration: 150, Score: 147
Iteration: 160, Score: 194
Iteration: 170, Score: 199
Iteration: 180, Score: 199
Iteration: 190, Score: 199
Iteration: 200, Score: 199
标签:critic,Iteration,state,Actor,paddle,Critic,Score,actor,Method 来源: https://blog.csdn.net/a1251726152/article/details/120888888