diff --git a/python/paddle/io/dataloader/collate.py b/python/paddle/io/dataloader/collate.py index cf3d3be5e847f5..36e4126b6aed1b 100644 --- a/python/paddle/io/dataloader/collate.py +++ b/python/paddle/io/dataloader/collate.py @@ -70,7 +70,7 @@ def default_collate_fn(batch): sample_fields_num = len(sample) if not all(len(sample) == sample_fields_num for sample in iter(batch)): raise RuntimeError( - "fileds number not same among samples in a batch" + "fields number not same among samples in a batch" ) return [default_collate_fn(fields) for fields in zip(*batch)] @@ -88,8 +88,8 @@ def default_convert_fn(batch): dictionary, string, number, numpy array and paddle.Tensor. .. note:: - This function is default :attr:`collate_fn` in **Distable - automatic batching** mode, for **Distable automatic batching** + This function is default :attr:`collate_fn` in **Disable + automatic batching** mode, for **Disable automatic batching** mode, please ses :attr:`paddle.io.DataLoader` Args: diff --git a/python/paddle/io/dataloader/dataloader_iter.py b/python/paddle/io/dataloader/dataloader_iter.py index d8ba4a7685d307..4d55ed8f412fc4 100644 --- a/python/paddle/io/dataloader/dataloader_iter.py +++ b/python/paddle/io/dataloader/dataloader_iter.py @@ -164,7 +164,7 @@ def __init__(self, loader): self._drop_last, ) - # NOTE: _structrue_infos used to record the data structure of + # NOTE: _structure_infos used to record the data structure of # batch to restore batch structure after reading Tensor # from blocking_queue in single-process mode. Note that # only single process is used in single-process mode, we diff --git a/python/paddle/io/dataloader/dataset.py b/python/paddle/io/dataloader/dataset.py index f4fe81f76a8e87..3200269039a836 100755 --- a/python/paddle/io/dataloader/dataset.py +++ b/python/paddle/io/dataloader/dataset.py @@ -325,7 +325,7 @@ class ComposeDataset(Dataset): """ A Dataset which composes fields of multiple datasets. - This dataset is used for composing fileds of multiple map-style + This dataset is used for composing fields of multiple map-style datasets of same length. Args: diff --git a/python/paddle/io/dataloader/flat.py b/python/paddle/io/dataloader/flat.py index 36b899e3f55c29..23fd255977ce00 100644 --- a/python/paddle/io/dataloader/flat.py +++ b/python/paddle/io/dataloader/flat.py @@ -96,7 +96,7 @@ def _flatten(batch, flat_batch, structure, field_idx): def _restore_batch(flat_batch, structure): """ After reading list of Tensor data from lod_blocking_queue outputs, - use this function to restore the batch data structrue, replace + use this function to restore the batch data structure, replace :attr:`_paddle_field_x` with data from flat_batch """ diff --git a/python/paddle/io/dataloader/sampler.py b/python/paddle/io/dataloader/sampler.py index f6bb2e41b4b8f8..795402121ea9d8 100644 --- a/python/paddle/io/dataloader/sampler.py +++ b/python/paddle/io/dataloader/sampler.py @@ -288,7 +288,7 @@ def _weighted_sample(weights, num_samples, replacement=True): class WeightedRandomSampler(Sampler): """ - Random sample with given weights (probabilities), sampe index will be in range + Random sample with given weights (probabilities), sample index will be in range [0, len(weights) - 1], if :attr:`replacement` is True, index can be sampled multiple times. diff --git a/python/paddle/io/dataloader/worker.py b/python/paddle/io/dataloader/worker.py index 8829b6ee13d5c1..a7d49b3e172a15 100644 --- a/python/paddle/io/dataloader/worker.py +++ b/python/paddle/io/dataloader/worker.py @@ -85,7 +85,7 @@ def get_worker_info(): :attr:`num_workers`: total worker process number, see `paddle.io.DataLoader` - :attr:`id`: the worker processs id, count from 0 to :attr:`num_workers - 1` + :attr:`id`: the worker process id, count from 0 to :attr:`num_workers - 1` :attr:`dataset`: the dataset object in this worker process diff --git a/python/paddle/quantization/imperative/ptq_quantizer.py b/python/paddle/quantization/imperative/ptq_quantizer.py index 00891ffa9f4116..d009b486da8ed2 100644 --- a/python/paddle/quantization/imperative/ptq_quantizer.py +++ b/python/paddle/quantization/imperative/ptq_quantizer.py @@ -69,14 +69,14 @@ def combine_abs_max_and_hist( # bin_width = origin_max / (bins * upsample_bins) # = new_max / (bins * downsample_bins) bin_width = origin_max / (bins * upsample_bins) - downsampe_bins = int(math.ceil(new_max / (bins * bin_width))) - new_max = bins * bin_width * downsampe_bins + downsample_bins = int(math.ceil(new_max / (bins * bin_width))) + new_max = bins * bin_width * downsample_bins upsampled_hist = np.repeat(origin_hist, upsample_bins) - expanded_hist = np.zeros((bins * downsampe_bins), dtype=np.float32) + expanded_hist = np.zeros((bins * downsample_bins), dtype=np.float32) expanded_hist[0 : bins * upsample_bins] = upsampled_hist cumsumed_hist = np.cumsum(expanded_hist, dtype=np.float64)[ - downsampe_bins - 1 :: downsampe_bins + downsample_bins - 1 :: downsample_bins ] shift_cumsumed_hist = np.zeros((bins), dtype=np.float64) shift_cumsumed_hist[1:] = cumsumed_hist[0:-1]