Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions python/paddle/io/dataloader/collate.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def default_collate_fn(batch):
sample_fields_num = len(sample)
if not all(len(sample) == sample_fields_num for sample in iter(batch)):
raise RuntimeError(
"fileds number not same among samples in a batch"
"fields number not same among samples in a batch"
)
return [default_collate_fn(fields) for fields in zip(*batch)]

Expand All @@ -88,8 +88,8 @@ def default_convert_fn(batch):
dictionary, string, number, numpy array and paddle.Tensor.

.. note::
This function is default :attr:`collate_fn` in **Distable
automatic batching** mode, for **Distable automatic batching**
This function is default :attr:`collate_fn` in **Disable
automatic batching** mode, for **Disable automatic batching**
mode, please ses :attr:`paddle.io.DataLoader`

Args:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/io/dataloader/dataloader_iter.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,7 @@ def __init__(self, loader):
self._drop_last,
)

# NOTE: _structrue_infos used to record the data structure of
# NOTE: _structure_infos used to record the data structure of
# batch to restore batch structure after reading Tensor
# from blocking_queue in single-process mode. Note that
# only single process is used in single-process mode, we
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/io/dataloader/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -325,7 +325,7 @@ class ComposeDataset(Dataset):
"""
A Dataset which composes fields of multiple datasets.

This dataset is used for composing fileds of multiple map-style
This dataset is used for composing fields of multiple map-style
datasets of same length.

Args:
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/io/dataloader/flat.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def _flatten(batch, flat_batch, structure, field_idx):
def _restore_batch(flat_batch, structure):
"""
After reading list of Tensor data from lod_blocking_queue outputs,
use this function to restore the batch data structrue, replace
use this function to restore the batch data structure, replace
:attr:`_paddle_field_x` with data from flat_batch
"""

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/io/dataloader/sampler.py
Original file line number Diff line number Diff line change
Expand Up @@ -288,7 +288,7 @@ def _weighted_sample(weights, num_samples, replacement=True):

class WeightedRandomSampler(Sampler):
"""
Random sample with given weights (probabilities), sampe index will be in range
Random sample with given weights (probabilities), sample index will be in range
[0, len(weights) - 1], if :attr:`replacement` is True, index can be sampled
multiple times.

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/io/dataloader/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,7 @@ def get_worker_info():

:attr:`num_workers`: total worker process number, see `paddle.io.DataLoader`

:attr:`id`: the worker processs id, count from 0 to :attr:`num_workers - 1`
:attr:`id`: the worker process id, count from 0 to :attr:`num_workers - 1`

:attr:`dataset`: the dataset object in this worker process

Expand Down
8 changes: 4 additions & 4 deletions python/paddle/quantization/imperative/ptq_quantizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,14 +69,14 @@ def combine_abs_max_and_hist(
# bin_width = origin_max / (bins * upsample_bins)
# = new_max / (bins * downsample_bins)
bin_width = origin_max / (bins * upsample_bins)
downsampe_bins = int(math.ceil(new_max / (bins * bin_width)))
new_max = bins * bin_width * downsampe_bins
downsample_bins = int(math.ceil(new_max / (bins * bin_width)))
new_max = bins * bin_width * downsample_bins

upsampled_hist = np.repeat(origin_hist, upsample_bins)
expanded_hist = np.zeros((bins * downsampe_bins), dtype=np.float32)
expanded_hist = np.zeros((bins * downsample_bins), dtype=np.float32)
expanded_hist[0 : bins * upsample_bins] = upsampled_hist
cumsumed_hist = np.cumsum(expanded_hist, dtype=np.float64)[
downsampe_bins - 1 :: downsampe_bins
downsample_bins - 1 :: downsample_bins
]
shift_cumsumed_hist = np.zeros((bins), dtype=np.float64)
shift_cumsumed_hist[1:] = cumsumed_hist[0:-1]
Expand Down