@@ -673,12 +673,13 @@ def compile(
673673 coefficients.
674674 weighted_metrics: List of metrics to be evaluated and weighted by
675675 `sample_weight` or `class_weight` during training and testing.
676- run_eagerly: Bool. Defaults to `False`. If `True`, this `Model`'s
677- logic will not be wrapped in a `tf.function`. Recommended to leave
678- this as `None` unless your `Model` cannot be run inside a
679- `tf.function`. `run_eagerly=True` is not supported when using
680- `tf.distribute.experimental.ParameterServerStrategy`.
681- steps_per_execution: Int. Defaults to 1. The number of batches to
676+ run_eagerly: Bool. If `True`, this `Model`'s logic will not be
677+ wrapped in a `tf.function`. Recommended to leave this as `None`
678+ unless your `Model` cannot be run inside a `tf.function`.
679+ `run_eagerly=True` is not supported when using
680+ `tf.distribute.experimental.ParameterServerStrategy`. Defaults to
681+ `False`.
682+ steps_per_execution: Int. The number of batches to
682683 run during each `tf.function` call. Running multiple batches
683684 inside a single `tf.function` call can greatly improve performance
684685 on TPUs or small models with a large Python overhead. At most, one
@@ -687,7 +688,7 @@ def compile(
687688 the size of the epoch. Note that if `steps_per_execution` is set
688689 to `N`, `Callback.on_batch_begin` and `Callback.on_batch_end`
689690 methods will only be called every `N` batches (i.e. before/after
690- each `tf.function` execution).
691+ each `tf.function` execution). Defaults to `1`.
691692 jit_compile: If `True`, compile the model training step with XLA.
692693 [XLA](https://www.tensorflow.org/xla) is an optimizing compiler
693694 for machine learning.
@@ -708,9 +709,10 @@ def compile(
708709 not process the same data. The number of shards should be at least
709710 the number of workers for good performance. A value of 'auto'
710711 turns on exact evaluation and uses a heuristic for the number of
711- shards based on the number of workers. Defaults to 0, meaning no
712+ shards based on the number of workers. 0, meaning no
712713 visitation guarantee is provided. NOTE: Custom implementations of
713714 `Model.test_step` will be ignored when doing exact evaluation.
715+ Defaults to `0`.
714716 **kwargs: Arguments supported for backwards compatibility only.
715717 """
716718 if jit_compile and not tf_utils .can_jit_compile (warn = True ):
@@ -1457,11 +1459,11 @@ def fit(
14571459 of index `epochs` is reached.
14581460 verbose: 'auto', 0, 1, or 2. Verbosity mode.
14591461 0 = silent, 1 = progress bar, 2 = one line per epoch.
1460- 'auto' defaults to 1 for most cases, but 2 when used with
1462+ 'auto' becomes 1 for most cases, but 2 when used with
14611463 `ParameterServerStrategy`. Note that the progress bar is not
14621464 particularly useful when logged to a file, so verbose=2 is
14631465 recommended when not running interactively (eg, in a production
1464- environment).
1466+ environment). Defaults to 'auto'.
14651467 callbacks: List of `keras.callbacks.Callback` instances.
14661468 List of callbacks to apply during training.
14671469 See `tf.keras.callbacks`. Note
@@ -2059,11 +2061,11 @@ def evaluate(
20592061 they generate batches).
20602062 verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
20612063 0 = silent, 1 = progress bar, 2 = single line.
2062- `"auto"` defaults to 1 for most cases, and to 2 when used with
2064+ `"auto"` becomes 1 for most cases, and to 2 when used with
20632065 `ParameterServerStrategy`. Note that the progress bar is not
20642066 particularly useful when logged to a file, so `verbose=2` is
20652067 recommended when not running interactively (e.g. in a production
2066- environment).
2068+ environment). Defaults to 'auto'.
20672069 sample_weight: Optional Numpy array of weights for the test samples,
20682070 used for weighting the loss function. You can either pass a flat
20692071 (1D) Numpy array with the same length as the input samples
@@ -2419,11 +2421,11 @@ def predict(
24192421 (since they generate batches).
24202422 verbose: `"auto"`, 0, 1, or 2. Verbosity mode.
24212423 0 = silent, 1 = progress bar, 2 = single line.
2422- `"auto"` defaults to 1 for most cases, and to 2 when used with
2424+ `"auto"` becomes 1 for most cases, and to 2 when used with
24232425 `ParameterServerStrategy`. Note that the progress bar is not
24242426 particularly useful when logged to a file, so `verbose=2` is
24252427 recommended when not running interactively (e.g. in a production
2426- environment).
2428+ environment). Defaults to 'auto'.
24272429 steps: Total number of steps (batches of samples)
24282430 before declaring the prediction round finished.
24292431 Ignored with the default value of `None`. If x is a `tf.data`
@@ -2958,7 +2960,7 @@ def save(self, filepath, overwrite=True, save_format=None, **kwargs):
29582960 SavedModel format arguments:
29592961 include_optimizer: Only applied to SavedModel and legacy HDF5
29602962 formats. If False, do not save the optimizer state.
2961- Defaults to True.
2963+ Defaults to ` True` .
29622964 signatures: Only applies to SavedModel format. Signatures to save
29632965 with the SavedModel. See the `signatures` argument in
29642966 `tf.saved_model.save` for details.
@@ -3051,7 +3053,7 @@ def save_weights(
30513053 target location, or provide the user with a manual prompt.
30523054 save_format: Either 'tf' or 'h5'. A `filepath` ending in '.h5' or
30533055 '.keras' will default to HDF5 if `save_format` is `None`.
3054- Otherwise `None` defaults to 'tf'.
3056+ Otherwise, `None` becomes 'tf'. Defaults to `None` .
30553057 options: Optional `tf.train.CheckpointOptions` object that specifies
30563058 options for saving weights.
30573059
@@ -3366,17 +3368,17 @@ def summary(
33663368 (e.g. set this to adapt the display to different
33673369 terminal window sizes).
33683370 positions: Relative or absolute positions of log elements
3369- in each line. If not provided,
3370- defaults to `[0.3, 0.6, 0.70, 1.]`
3371+ in each line. If not provided, becomes
3372+ `[0.3, 0.6, 0.70, 1.]`. Defaults to `None`.
33713373 print_fn: Print function to use. By default, prints to `stdout`.
33723374 If `stdout` doesn't work in your environment, change to `print`.
33733375 It will be called on each line of the summary.
33743376 You can set it to a custom function
33753377 in order to capture the string summary.
33763378 expand_nested: Whether to expand the nested models.
3377- If not provided, defaults to `False`.
3379+ Defaults to `False`.
33783380 show_trainable: Whether to show if a layer is trainable.
3379- If not provided, defaults to `False`.
3381+ Defaults to `False`.
33803382 layer_range: a list or tuple of 2 strings,
33813383 which is the starting layer name and ending layer name
33823384 (both inclusive) indicating the range of layers to be printed
@@ -3942,7 +3944,8 @@ def _get_compile_args(self, user_metrics=True):
39423944
39433945 Args:
39443946 user_metrics: Whether to return user-supplied metrics or `Metric`
3945- objects. Defaults to returning the user-supplied metrics.
3947+ objects. If True, returns the user-supplied metrics.
3948+ Defaults to `True`.
39463949
39473950 Returns:
39483951 Dictionary of arguments that were used when compiling the model.
@@ -4186,11 +4189,11 @@ def _get_verbosity(verbose, distribute_strategy):
41864189 distribute_strategy ._should_use_with_coordinator
41874190 or not io_utils .is_interactive_logging_enabled ()
41884191 ):
4189- # Default to epoch-level logging for PSStrategy or using absl
4192+ # Defaults to epoch-level logging for PSStrategy or using absl
41904193 # logging.
41914194 return 2
41924195 else :
4193- return 1 # Default to batch-level logging otherwise.
4196+ return 1 # Defaults to batch-level logging otherwise.
41944197 return verbose
41954198
41964199
0 commit comments