From 7ec439206853ec1a7ba74cfeeaa15d45d30475cc Mon Sep 17 00:00:00 2001 From: gui11aume Date: Wed, 26 Apr 2023 14:45:08 +0000 Subject: [PATCH] Remove in-place operations in NegativeBinomial (#96748) This is a suggestion for a minor modification. The line `log_normalization[self.total_count + value == 0.] = 0.` prevents Jit compilation when the condition occurs, with the error message `RuntimeError: a view of a leaf Variable that requires grad is being used in an in-place operation.` I propose an alternative that does not involve in-place operations. It uses the function `nan_to_num()` to replace infinite values by 0 where `self.total_count + value == 0.` while leaving `nan` and `-inf` as they are. Readability is suboptimal because the code does not replace nan with numbers, but I could not find a function that only replaces infinite values. Pull Request resolved: https://github.com/pytorch/pytorch/pull/96748 Approved by: https://github.com/fritzo, https://github.com/soulitzer --- torch/distributions/negative_binomial.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/torch/distributions/negative_binomial.py b/torch/distributions/negative_binomial.py index 36ea72da374..1fdbd85488c 100644 --- a/torch/distributions/negative_binomial.py +++ b/torch/distributions/negative_binomial.py @@ -101,6 +101,9 @@ class NegativeBinomial(Distribution): log_normalization = (-torch.lgamma(self.total_count + value) + torch.lgamma(1. + value) + torch.lgamma(self.total_count)) - log_normalization[self.total_count + value == 0.] = 0. + # The case self.total_count == 0 and value == 0 has probability 1 but + # lgamma(0) is infinite. Handle this case separately using a function + # that does not modify tensors in place to allow Jit compilation. + log_normalization = log_normalization.masked_fill(self.total_count + value == 0., 0.) return log_unnormalized_prob - log_normalization