Skip to content
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions scripts/speech_recognition/estimate_duration_bins_2d.py
Original file line number Diff line number Diff line change
Expand Up @@ -264,7 +264,7 @@ def _estimate_token_buckets(max_bucket_duration, start_idx, end_idx, corr_subbuc
# Here, we'd end up making token subbuckets for: [5, 20, 40]
# where [20, 40] bucket will have 4 times more subbuckets (as we combined 4 buckets into 1) than usual bucket in that settings.

for i, (duration_bin, bin_idx) in enumerate(zip(duration_bins, bin_indexes[1:])):
for i, (duration_bin, binidx) in enumerate(zip(duration_bins, bin_indexes[1:])):
if (i != len(duration_bins) - 1 and duration_bins[i + 1] == duration_bin) or (
i == len(duration_bins) - 1 and max_duration == duration_bin
):
Expand All @@ -276,7 +276,7 @@ def _estimate_token_buckets(max_bucket_duration, start_idx, end_idx, corr_subbuc
end_idx=binidx,
corr_subbuckets=num_subbuckets * skipped_buckets,
)
start_idx = bin_idx
start_idx = binidx
skipped_buckets = 1

# Estimate an extra 2D bin set for global max duration.
Expand All @@ -285,7 +285,7 @@ def _estimate_token_buckets(max_bucket_duration, start_idx, end_idx, corr_subbuc
_estimate_token_buckets(
max_bucket_duration=max_duration,
start_idx=start_idx,
end_idx=binidx,
end_idx=len(sizes),
corr_subbuckets=num_subbuckets * skipped_buckets,
)
return bins, tps_thresholds
Expand Down
Loading