Skip to content

Commit 0886284

Browse files
Fix torch.nonzero inconsistency and n-gram range bug in evaluation.py
Co-authored-by: Hananel-Hazan <[email protected]>
1 parent 61d641e commit 0886284

File tree

1 file changed

+4
-4
lines changed

1 file changed

+4
-4
lines changed

bindsnet/evaluation/evaluation.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -195,13 +195,13 @@ def ngram(
195195

196196
# Aggregate all of the firing neurons' indices
197197
fire_order = []
198-
for t in range(activity.size()[0]):
199-
ordering = torch.nonzero(activity[t].view(-1))
198+
for t in range(activity.size(0)):
199+
ordering = torch.nonzero(activity[t]).view(-1)
200200
if ordering.numel() > 0:
201-
fire_order += ordering[:, 0].tolist()
201+
fire_order += ordering.tolist()
202202

203203
# Consider all n-gram sequences.
204-
for j in range(len(fire_order) - n):
204+
for j in range(len(fire_order) - n + 1):
205205
if tuple(fire_order[j : j + n]) in ngram_scores:
206206
score += ngram_scores[tuple(fire_order[j : j + n])]
207207

0 commit comments

Comments
 (0)