From 7afcd9cda291e2b43f078cd9d97c572c5d5b8633 Mon Sep 17 00:00:00 2001 From: Albert Villanova del Moral <8515462+albertvillanova@users.noreply.github.com> Date: Wed, 30 Jun 2021 09:40:06 +0200 Subject: [PATCH 1/2] Minor fix docs format for bertscore --- metrics/bertscore/bertscore.py | 55 ++++++++++++++++++---------------- 1 file changed, 30 insertions(+), 25 deletions(-) diff --git a/metrics/bertscore/bertscore.py b/metrics/bertscore/bertscore.py index ac14d99b8fe..0a89782b1fe 100644 --- a/metrics/bertscore/bertscore.py +++ b/metrics/bertscore/bertscore.py @@ -30,40 +30,45 @@ """ _DESCRIPTION = """\ -BERTScore leverages the pre-trained contextual embeddings from BERT and matches words in candidate and reference sentences by cosine similarity. +BERTScore leverages the pre-trained contextual embeddings from BERT and matches words in candidate and reference +sentences by cosine similarity. It has been shown to correlate with human judgment on sentence-level and system-level evaluation. -Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language generation tasks. +Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language +generation tasks. -See the [README.md] file at https://github.com/Tiiiger/bert_score for more information. +See the `README.md` file at [https://github.com/Tiiiger/bert_score](https://github.com/Tiiiger/bert_score) for more +information. """ _KWARGS_DESCRIPTION = """ BERTScore Metrics with the hashcode from a source against one or more references. Args: - `predictions` (list of str): prediction/candidate sentences - `references` (list of str or list of list of str): reference sentences - `lang` (str): language of the sentences; required (e.g. 'en') - `model_type` (str): bert specification, default using the suggested - model for the target language; has to specify at least one of - `model_type` or `lang` - `num_layers` (int): the layer of representation to use. - default using the number of layers tuned on WMT16 correlation data - `verbose` (bool): turn on intermediate status update - `idf` (bool or dict): use idf weighting, can also be a precomputed idf_dict - `device` (str): on which the contextual embedding model will be allocated on. - If this argument is None, the model lives on cuda:0 if cuda is available. - `nthreads` (int): number of threads - `batch_size` (int): bert score processing batch size - at least one of `model_type` or `lang`. `lang` needs to be - specified when `rescale_with_baseline` is True. - `rescale_with_baseline` (bool): rescale bertscore with pre-computed baseline - `baseline_path` (str): customized baseline file. + predictions (list of str): Prediction/candidate sentences. + references (list of str or list of list of str): Reference sentences. + lang (str): Language of the sentences; required (e.g. 'en'). + model_type (str): Bert specification, default using the suggested + model for the target language; has to specify at least one of + `model_type` or `lang`. + num_layers (int): The layer of representation to use, + default using the number of layers tuned on WMT16 correlation data. + verbose (bool): Turn on intermediate status update. + idf (bool or dict): Use idf weighting; can also be a precomputed idf_dict. + device (str): On which the contextual embedding model will be allocated on. + If this argument is None, the model lives on cuda:0 if cuda is available. + nthreads (int): Number of threads. + batch_size (int): Bert score processing batch size, + at least one of `model_type` or `lang`. `lang` needs to be + specified when `rescale_with_baseline` is True. + rescale_with_baseline (bool): Rescale bertscore with pre-computed baseline. + baseline_path (str): Customized baseline file. + Returns: - 'precision': Precision, - 'recall': Recall, - 'f1', F1 score, - 'hashcode': Hashcode of the library, + precision: Precision. + recall: Recall. + f1: F1 score. + hashcode: Hashcode of the library. + Examples: >>> predictions = ["hello there", "general kenobi"] From 67c31780cb21b4bfaf8bdd4f8532f058204e6528 Mon Sep 17 00:00:00 2001 From: Albert Villanova del Moral <8515462+albertvillanova@users.noreply.github.com> Date: Wed, 30 Jun 2021 09:52:06 +0200 Subject: [PATCH 2/2] Fix style --- metrics/bertscore/bertscore.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/metrics/bertscore/bertscore.py b/metrics/bertscore/bertscore.py index 0a89782b1fe..224cb581cbc 100644 --- a/metrics/bertscore/bertscore.py +++ b/metrics/bertscore/bertscore.py @@ -30,13 +30,13 @@ """ _DESCRIPTION = """\ -BERTScore leverages the pre-trained contextual embeddings from BERT and matches words in candidate and reference +BERTScore leverages the pre-trained contextual embeddings from BERT and matches words in candidate and reference sentences by cosine similarity. It has been shown to correlate with human judgment on sentence-level and system-level evaluation. -Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language +Moreover, BERTScore computes precision, recall, and F1 measure, which can be useful for evaluating different language generation tasks. -See the `README.md` file at [https://github.com/Tiiiger/bert_score](https://github.com/Tiiiger/bert_score) for more +See the `README.md` file at [https://github.com/Tiiiger/bert_score](https://github.com/Tiiiger/bert_score) for more information. """