Skip to content

Commit c23cbdf

Browse files
authored
Fix docstrings with last version of hf-doc-builder styler (#18581)
* Fix docstrings with last version of hf-doc-builder styler * Remove empty Parameter block
1 parent 42b8940 commit c23cbdf

16 files changed

+0
-30
lines changed

src/transformers/benchmark/benchmark_utils.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -79,7 +79,6 @@ def separate_process_wrapper_fn(func: Callable[[], None], do_multi_processing: b
7979
measurements it is important that the function is executed in a separate process
8080
8181
Args:
82-
8382
- `func`: (`callable`): function() -> ... generic function which will be executed in its own separate process
8483
- `do_multi_processing`: (`bool`) Whether to run function on separate process or not
8584
"""
@@ -210,7 +209,6 @@ def measure_peak_memory_cpu(function: Callable[[], None], interval=0.5, device_i
210209
https://github.com/pythonprofilers/memory_profiler/blob/895c4ac7a08020d66ae001e24067da6dcea42451/memory_profiler.py#L239
211210
212211
Args:
213-
214212
- `function`: (`callable`): function() -> ... function without any arguments to measure for which to measure
215213
the peak memory
216214
@@ -228,7 +226,6 @@ def get_cpu_memory(process_id: int) -> int:
228226
measures current cpu memory usage of a given `process_id`
229227
230228
Args:
231-
232229
- `process_id`: (`int`) process_id for which to measure memory
233230
234231
Returns
@@ -336,7 +333,6 @@ def start_memory_tracing(
336333
https://psutil.readthedocs.io/en/latest/#psutil.Process.memory_info
337334
338335
Args:
339-
340336
- `modules_to_trace`: (None, string, list/tuple of string) if None, all events are recorded if string or list
341337
of strings: only events from the listed module/sub-module will be recorded (e.g. 'fairseq' or
342338
'transformers.models.gpt2.modeling_gpt2')
@@ -483,7 +479,6 @@ def stop_memory_tracing(
483479
Stop memory tracing cleanly and return a summary of the memory trace if a trace is given.
484480
485481
Args:
486-
487482
`memory_trace` (optional output of start_memory_tracing, default: None):
488483
memory trace to convert in summary
489484
`ignore_released_memory` (boolean, default: None):

src/transformers/generation_flax_utils.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -208,7 +208,6 @@ def generate(
208208
post](https://huggingface.co/blog/how-to-generate).
209209
210210
Parameters:
211-
212211
input_ids (`jnp.ndarray` of shape `(batch_size, sequence_length)`):
213212
The sequence used as a prompt for the generation.
214213
max_length (`int`, *optional*, defaults to `model.config.max_length`):

src/transformers/generation_tf_utils.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -418,7 +418,6 @@ def generate(
418418
post](https://huggingface.co/blog/how-to-generate).
419419
420420
Parameters:
421-
422421
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`, `(batch_size, sequence_length,
423422
feature_dim)` or `(batch_size, num_channels, height, width)`, *optional*):
424423
The sequence used as a prompt for the generation or as model inputs to the encoder. If `None` the
@@ -1336,7 +1335,6 @@ def _generate(
13361335
post](https://huggingface.co/blog/how-to-generate).
13371336
13381337
Parameters:
1339-
13401338
input_ids (`tf.Tensor` of `dtype=tf.int32` and shape `(batch_size, sequence_length)`, *optional*):
13411339
The sequence used as a prompt for the generation. If `None` the method initializes it with
13421340
`bos_token_id` and a batch size of 1.
@@ -2070,7 +2068,6 @@ def greedy_search(
20702068
Generates sequences for models with a language modeling head using greedy decoding.
20712069
20722070
Parameters:
2073-
20742071
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
20752072
The sequence used as a prompt for the generation.
20762073
logits_processor (`TFLogitsProcessorList`, *optional*):
@@ -2323,7 +2320,6 @@ def sample(
23232320
Generates sequences for models with a language modeling head using multinomial sampling.
23242321
23252322
Parameters:
2326-
23272323
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
23282324
The sequence used as a prompt for the generation.
23292325
logits_processor (`TFLogitsProcessorList`, *optional*):
@@ -2600,7 +2596,6 @@ def beam_search(
26002596
Generates sequences for models with a language modeling head using beam search with multinomial sampling.
26012597
26022598
Parameters:
2603-
26042599
input_ids (`tf.Tensor` of shape `(batch_size, sequence_length)`):
26052600
The sequence used as a prompt for the generation.
26062601
max_length (`int`, *optional*, defaults to 20):

src/transformers/generation_utils.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1555,7 +1555,6 @@ def greedy_search(
15551555
used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
15561556
15571557
Parameters:
1558-
15591558
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
15601559
The sequence used as a prompt for the generation.
15611560
logits_processor (`LogitsProcessorList`, *optional*):
@@ -1789,7 +1788,6 @@ def sample(
17891788
can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
17901789
17911790
Parameters:
1792-
17931791
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
17941792
The sequence used as a prompt for the generation.
17951793
logits_processor (`LogitsProcessorList`, *optional*):
@@ -2046,7 +2044,6 @@ def beam_search(
20462044
can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
20472045
20482046
Parameters:
2049-
20502047
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
20512048
The sequence used as a prompt for the generation.
20522049
beam_scorer (`BeamScorer`):
@@ -2355,7 +2352,6 @@ def beam_sample(
23552352
sampling** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
23562353
23572354
Parameters:
2358-
23592355
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
23602356
The sequence used as a prompt for the generation.
23612357
beam_scorer (`BeamScorer`):
@@ -2672,7 +2668,6 @@ def group_beam_search(
26722668
decoding** and can be used for text-decoder, text-to-text, speech-to-text, and vision-to-text models.
26732669
26742670
Parameters:
2675-
26762671
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
26772672
The sequence used as a prompt for the generation.
26782673
beam_scorer (`BeamScorer`):

src/transformers/modelcard.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -80,8 +80,6 @@ class ModelCard:
8080
Inioluwa Deborah Raji and Timnit Gebru for the proposal behind model cards. Link: https://arxiv.org/abs/1810.03993
8181
8282
Note: A model card can be loaded and saved to disk.
83-
84-
Parameters:
8583
"""
8684

8785
def __init__(self, **kwargs):

src/transformers/models/auto/auto_factory.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -563,7 +563,6 @@ class _LazyAutoMapping(OrderedDict):
563563
" A mapping config to object (model or tokenizer for instance) that will load keys and values when it is accessed.
564564
565565
Args:
566-
567566
- config_mapping: The map model type to config class
568567
- model_mapping: The map model type to model (or tokenizer) class
569568
"""

src/transformers/models/flaubert/tokenization_flaubert.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,6 @@ def _tokenize(self, text, bypass_tokenizer=False):
130130
- Install with `pip install sacremoses`
131131
132132
Args:
133-
134133
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)
135134
(bool). If True, we only apply BPE.
136135

src/transformers/models/fsmt/tokenization_fsmt.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -354,7 +354,6 @@ def _tokenize(self, text, lang="en", bypass_tokenizer=False):
354354
- Install with `pip install sacremoses`
355355
356356
Args:
357-
358357
- lang: ISO language code (default = 'en') (string). Languages should belong of the model supported
359358
languages. However, we don't enforce it.
360359
- bypass_tokenizer: Allow users to preprocess and tokenize the sentences externally (default = False)

src/transformers/models/perceiver/modeling_perceiver.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1960,7 +1960,6 @@ def build_position_encoding(
19601960
Builds the position encoding.
19611961
19621962
Args:
1963-
19641963
- out_channels: refers to the number of channels of the position encodings.
19651964
- project_pos_dim: if specified, will project the position encodings to this dimension.
19661965

src/transformers/models/tapex/tokenization_tapex.py

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1398,7 +1398,6 @@ def truncate_table_rows(
13981398
):
13991399
"""
14001400
Args:
1401-
14021401
table_content:
14031402
{"header": xxx, "rows": xxx, "id" (Optionally): xxx}
14041403

0 commit comments

Comments
 (0)