-
Notifications
You must be signed in to change notification settings - Fork 5.9k
Fix error message of multinomial op #27946
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 8 commits
9128de9
74ed1ee
2144fb6
e6e54ea
1bb315b
8f35154
d5fe719
f616d5b
994713c
01f331d
8250e58
0b7f5cf
9a3b6e4
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -21,6 +21,7 @@ limitations under the License. */ | |
| #include "paddle/fluid/framework/op_registry.h" | ||
| #include "paddle/fluid/framework/operator.h" | ||
| #include "paddle/fluid/operators/multinomial_op.h" | ||
| #include "paddle/fluid/platform/enforce.h" | ||
| #include "paddle/fluid/platform/transform.h" | ||
|
|
||
| namespace paddle { | ||
|
|
@@ -31,6 +32,16 @@ __global__ void NormalizeProbability(T* norm_probs, const T* in_data, | |
| T* sum_rows) { | ||
| int id = threadIdx.x + blockIdx.x * blockDim.x + | ||
| blockIdx.y * gridDim.x * blockDim.x; | ||
| PADDLE_ENFORCE( | ||
| in_data[id] >= 0.0, | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 建议报错信息统一加句点,PR里有的加了,有的没加
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. done |
||
| "The input of multinomial distribution should be >= 0, but got %f", | ||
| in_data[id]); | ||
| PADDLE_ENFORCE(in_data[id] != INFINITY, | ||
|
||
| "The input of multinomial distribution shoud not be infinity"); | ||
| PADDLE_ENFORCE(in_data[id] != NAN, | ||
| "The input of multinomial distribution shoud not be NaN"); | ||
| PADDLE_ENFORCE(sum_rows[blockIdx.y] > 0.0, | ||
| "The sum of input should not be 0"); | ||
|
||
| norm_probs[id] = in_data[id] / sum_rows[blockIdx.y]; | ||
| } | ||
|
|
||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -46,13 +46,18 @@ void MultinomialFunctor(int64_t* out_data, const T* in_data, | |
| prob_value = in_data[i * num_categories + j]; | ||
| PADDLE_ENFORCE_GE( | ||
| prob_value, 0.0, | ||
| platform::errors::OutOfRange("The input of multinomial distribution " | ||
| "should be >= 0, but got %f", | ||
| prob_value)); | ||
| PADDLE_ENFORCE_EQ( | ||
| std::isinf(static_cast<double>(prob_value)), false, | ||
| platform::errors::OutOfRange( | ||
| "The input of multinomial distribution should be >= 0")); | ||
| PADDLE_ENFORCE_EQ((std::isinf(static_cast<double>(prob_value)) || | ||
| std::isnan(static_cast<double>(prob_value))), | ||
| false, platform::errors::OutOfRange( | ||
| "The input of multinomial distribution " | ||
| "shoud not be infinity or NaN")); | ||
| "The input of multinomial distribution shoud not be infinity")); | ||
| PADDLE_ENFORCE_EQ( | ||
| std::isnan(static_cast<double>(prob_value)), false, | ||
| platform::errors::OutOfRange( | ||
| "The input of multinomial distribution shoud not be NaN")); | ||
|
||
|
|
||
| probs_sum += prob_value; | ||
| if (prob_value == 0) { | ||
| num_zeros += 1; | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -662,48 +662,50 @@ class Categorical(Distribution): | |
|
|
||
| Args: | ||
| logits(list|numpy.ndarray|Tensor): The logits input of categorical distribution. The data type is float32 or float64. | ||
| name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. | ||
|
|
||
| Examples: | ||
| .. code-block:: python | ||
|
|
||
| import paddle | ||
| from paddle.distribution import Categorical | ||
| import paddle | ||
| from paddle.distribution import Categorical | ||
|
|
||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
| # 0.09053693, 0.30820143, 0.19095989] | ||
| y = paddle.rand([6]) | ||
| print(y.numpy()) | ||
| # [0.6365463 , 0.7278677 , 0.90260243, | ||
| # 0.5226815 , 0.35837543, 0.13981032] | ||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
| # 0.09053693, 0.30820143, 0.19095989] | ||
| y = paddle.rand([6]) | ||
| print(y.numpy()) | ||
| # [0.6365463 , 0.7278677 , 0.90260243, | ||
| # 0.5226815 , 0.35837543, 0.13981032] | ||
|
|
||
| cat = Categorical(x) | ||
| cat2 = Categorical(y) | ||
| cat = Categorical(x) | ||
| cat2 = Categorical(y) | ||
|
|
||
| cat.sample([2,3]) | ||
| # [[5, 1, 1], | ||
| # [0, 1, 2]] | ||
| cat.sample([2,3]) | ||
| # [[5, 1, 1], | ||
| # [0, 1, 2]] | ||
|
|
||
| cat.entropy() | ||
| # [1.71887] | ||
| cat.entropy() | ||
| # [1.71887] | ||
|
|
||
| cat.kl_divergence(cat2) | ||
| # [0.0278455] | ||
| cat.kl_divergence(cat2) | ||
| # [0.0278455] | ||
|
|
||
| value = paddle.to_tensor([2,1,3]) | ||
| cat.probs(value) | ||
| # [0.341613 0.342648 0.03123] | ||
| value = paddle.to_tensor([2,1,3]) | ||
| cat.probs(value) | ||
| # [0.341613 0.342648 0.03123] | ||
|
|
||
| cat.log_prob(value) | ||
| # [-1.07408 -1.07105 -3.46638] | ||
| cat.log_prob(value) | ||
| # [-1.07408 -1.07105 -3.46638] | ||
|
|
||
| """ | ||
|
|
||
| def __init__(self, logits, name=None): | ||
| """ | ||
| Args: | ||
| logits(list|numpy.ndarray|Variable): The logits input of categorical distribution. The data type is float32 or float64. | ||
| logits(list|numpy.ndarray|Tensor): The logits input of categorical distribution. The data type is float32 or float64. | ||
| name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. | ||
| """ | ||
| if not in_dygraph_mode(): | ||
| check_type(logits, 'logits', (np.ndarray, tensor.Variable, list), | ||
|
|
@@ -727,27 +729,27 @@ def sample(self, shape): | |
| """Generate samples of the specified shape. | ||
|
|
||
| Args: | ||
| shape (list): Shape of the generated samples. | ||
| shape (list): Shape of the generated samples. | ||
|
|
||
| Returns: | ||
| Tensor: A tensor with prepended dimensions shape. | ||
| Tensor: A tensor with prepended dimensions shape. | ||
|
|
||
| Examples: | ||
| .. code-block:: python | ||
| .. code-block:: python | ||
|
|
||
| import paddle | ||
| from paddle.distribution import Categorical | ||
| import paddle | ||
| from paddle.distribution import Categorical | ||
|
|
||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
| # 0.09053693, 0.30820143, 0.19095989] | ||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
| # 0.09053693, 0.30820143, 0.19095989] | ||
|
|
||
| cat = Categorical(x) | ||
| cat = Categorical(x) | ||
|
|
||
| cat.sample([2,3]) | ||
| # [[5, 1, 1], | ||
| # [0, 1, 2]] | ||
| cat.sample([2,3]) | ||
| # [[5, 1, 1], | ||
| # [0, 1, 2]] | ||
|
|
||
| """ | ||
| name = self.name + '_sample' | ||
|
|
@@ -775,28 +777,28 @@ def kl_divergence(self, other): | |
| other (Categorical): instance of Categorical. The data type is float32. | ||
|
|
||
| Returns: | ||
| Variable: kl-divergence between two Categorical distributions. | ||
| Tensor: kl-divergence between two Categorical distributions. | ||
|
|
||
| Examples: | ||
| .. code-block:: python | ||
| .. code-block:: python | ||
|
|
||
| import paddle | ||
| from paddle.distribution import Categorical | ||
| import paddle | ||
| from paddle.distribution import Categorical | ||
|
|
||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
| # 0.09053693, 0.30820143, 0.19095989] | ||
| y = paddle.rand([6]) | ||
| print(y.numpy()) | ||
| # [0.6365463 , 0.7278677 , 0.90260243, | ||
| # 0.5226815 , 0.35837543, 0.13981032] | ||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
| # 0.09053693, 0.30820143, 0.19095989] | ||
| y = paddle.rand([6]) | ||
| print(y.numpy()) | ||
| # [0.6365463 , 0.7278677 , 0.90260243, | ||
| # 0.5226815 , 0.35837543, 0.13981032] | ||
|
|
||
| cat = Categorical(x) | ||
| cat2 = Categorical(y) | ||
| cat = Categorical(x) | ||
| cat2 = Categorical(y) | ||
|
|
||
| cat.kl_divergence(cat2) | ||
| # [0.0278455] | ||
| cat.kl_divergence(cat2) | ||
| # [0.0278455] | ||
|
|
||
| """ | ||
| name = self.name + '_kl_divergence' | ||
|
|
@@ -823,23 +825,23 @@ def entropy(self): | |
| """Shannon entropy in nats. | ||
|
|
||
| Returns: | ||
| Variable: Shannon entropy of Categorical distribution. The data type is float32. | ||
| Tensor: Shannon entropy of Categorical distribution. The data type is float32. | ||
|
|
||
| Examples: | ||
| .. code-block:: python | ||
| .. code-block:: python | ||
|
|
||
| import paddle | ||
| from paddle.distribution import Categorical | ||
| import paddle | ||
| from paddle.distribution import Categorical | ||
|
|
||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
| # 0.09053693, 0.30820143, 0.19095989] | ||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
| # 0.09053693, 0.30820143, 0.19095989] | ||
|
|
||
| cat = Categorical(x) | ||
| cat = Categorical(x) | ||
|
|
||
| cat.entropy() | ||
| # [1.71887] | ||
| cat.entropy() | ||
| # [1.71887] | ||
|
|
||
| """ | ||
| name = self.name + '_entropy' | ||
|
|
@@ -864,27 +866,27 @@ def probs(self, value): | |
| with ``logits. That is, ``value[:-1] = logits[:-1]``. | ||
|
|
||
| Args: | ||
| value (Tensor): The input tensor represents the selected category index. | ||
| value (Tensor): The input tensor represents the selected category index. | ||
|
|
||
| Returns: | ||
| Tensor: probability according to the category index. | ||
| Tensor: probability according to the category index. | ||
|
|
||
| Examples: | ||
| .. code-block:: python | ||
| .. code-block:: python | ||
|
|
||
| import paddle | ||
| from paddle.distribution import Categorical | ||
| import paddle | ||
| from paddle.distribution import Categorical | ||
|
|
||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
| # 0.09053693, 0.30820143, 0.19095989] | ||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
| # 0.09053693, 0.30820143, 0.19095989] | ||
|
|
||
| cat = Categorical(x) | ||
| cat = Categorical(x) | ||
|
|
||
| value = paddle.to_tensor([2,1,3]) | ||
| cat.probs(value) | ||
| # [0.341613 0.342648 0.03123] | ||
| value = paddle.to_tensor([2,1,3]) | ||
| cat.probs(value) | ||
| # [0.341613 0.342648 0.03123] | ||
|
|
||
| """ | ||
| name = self.name + '_probs' | ||
|
|
@@ -929,28 +931,28 @@ def log_prob(self, value): | |
| """Log probabilities of the given category. Refer to ``probs`` method. | ||
|
|
||
| Args: | ||
| value (Tensor): The input tensor represents the selected category index. | ||
| value (Tensor): The input tensor represents the selected category index. | ||
|
|
||
| Returns: | ||
| Tensor: Log probability. | ||
| Tensor: Log probability. | ||
|
|
||
| Examples: | ||
| .. code-block:: python | ||
| .. code-block:: python | ||
|
|
||
| import paddle | ||
| from paddle.distribution import Categorical | ||
| import paddle | ||
| from paddle.distribution import Categorical | ||
|
|
||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
| # 0.09053693, 0.30820143, 0.19095989] | ||
| x = paddle.rand([6]) | ||
| print(x.numpy()) | ||
| # [0.32564053, 0.99334985, 0.99034804, | ||
|
||
| # 0.09053693, 0.30820143, 0.19095989] | ||
|
|
||
| cat = Categorical(x) | ||
| cat = Categorical(x) | ||
|
|
||
| value = paddle.to_tensor([2,1,3]) | ||
| value = paddle.to_tensor([2,1,3]) | ||
|
|
||
| cat.log_prob(value) | ||
| # [-1.07408 -1.07105 -3.46638] | ||
| cat.log_prob(value) | ||
| # [-1.07408 -1.07105 -3.46638] | ||
|
|
||
| """ | ||
| name = self.name + '_log_prob' | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Similar for the others.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
done