其他分享
首页 > 其他分享> > log_prob (custom used in RL)

log_prob (custom used in RL)

作者:互联网

def log_prob(self, value, pre_tanh_value=None):
"""

:param value: some value, x
:param pre_tanh_value: arctanh(x)
:return:
"""

if pre_tanh_value is None:
pre_tanh_value = self.atanh(value)

return self.normal.log_prob(pre_tanh_value) - torch.log(
1 - value * value + self.epsilon
)


###################################################################

def forward(self, obs, reparameterize=True, return_log_prob=True):
log_prob = None

tanh_normal = self.actor(obs,reparameterize=reparameterize,)

if return_log_prob:
if reparameterize is True:
action, pre_tanh_value = tanh_normal.rsample(
return_pretanh_value=True
)
else:
action, pre_tanh_value = tanh_normal.sample(
return_pretanh_value=True
)

log_prob = tanh_normal.log_prob(
action,
pre_tanh_value=pre_tanh_value
)


log_prob = log_prob.sum(dim=1, keepdim=True) # get the entropy of the actions
else:
if reparameterize is True:
action = tanh_normal.rsample()
else:
action = tanh_normal.sample()
return action, log_prob



from:offlinerl/neorl

标签:pre,log,tanh,value,custom,RL,True,prob
来源: https://www.cnblogs.com/leifzhang/p/16200628.html