-
Notifications
You must be signed in to change notification settings - Fork 7.2k
Replace asserts with exceptions #5587
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 20 commits
dfb2862
40d0528
13bfd80
45ecd61
f522368
23bd022
6a87e4d
e179358
30b1714
488d2af
38d2d01
7d42574
dc6856b
2c56adc
aebca6d
d54b582
36d2174
98c2702
bdab5f4
4900653
d5ccbf1
de2f4b7
275012a
fddd2ac
7e60b46
6c2e94f
0a78c6b
cb95c97
ff8f557
0598990
abafdb2
5b30ce3
ade3364
2f4ecc1
981617b
fec7d4b
bdd913b
ca59cd7
3391f00
81ac57c
7affc95
8dc76e2
e68c1be
d92a0f9
4a30fc9
e4f214d
9e9ca6d
b234e08
1a45e1e
8437088
cff417a
1d9e3d3
ce06c29
2b1870f
851adb2
a915f1f
f41e115
ee21d2e
d000238
d77739b
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -144,16 +144,16 @@ def test_build_fx_feature_extractor(self, model_name): | |
| model, train_return_nodes=train_return_nodes, eval_return_nodes=eval_return_nodes | ||
| ) | ||
| # Check must specify return nodes | ||
| with pytest.raises(AssertionError): | ||
| with pytest.raises(RuntimeError): | ||
|
||
| self._create_feature_extractor(model) | ||
| # Check return_nodes and train_return_nodes / eval_return nodes | ||
| # mutual exclusivity | ||
| with pytest.raises(AssertionError): | ||
| with pytest.raises(RuntimeError): | ||
| self._create_feature_extractor( | ||
| model, return_nodes=train_return_nodes, train_return_nodes=train_return_nodes | ||
| ) | ||
| # Check train_return_nodes / eval_return nodes must both be specified | ||
| with pytest.raises(AssertionError): | ||
| with pytest.raises(RuntimeError): | ||
| self._create_feature_extractor(model, train_return_nodes=train_return_nodes) | ||
| # Check invalid node name raises ValueError | ||
| with pytest.raises(ValueError): | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -118,7 +118,8 @@ def __init__( | |
| print("Using legacy structure") | ||
| self.split_folder = root | ||
| self.split = "unknown" | ||
| assert not download, "Cannot download the videos using legacy_structure." | ||
| if download: | ||
| raise RuntimeError("Cannot download the videos using legacy_structure.") | ||
|
||
| else: | ||
| self.split_folder = path.join(root, split) | ||
| self.split = verify_str_arg(split, arg="split", valid_values=["train", "val"]) | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -45,8 +45,6 @@ def __init__( | |
| if not isinstance(aspect_ratios[0], (list, tuple)): | ||
| aspect_ratios = (aspect_ratios,) * len(sizes) | ||
|
|
||
| assert len(sizes) == len(aspect_ratios) | ||
|
|
||
| self.sizes = sizes | ||
| self.aspect_ratios = aspect_ratios | ||
| self.cell_anchors = [ | ||
|
|
@@ -86,32 +84,34 @@ def num_anchors_per_location(self): | |
| def grid_anchors(self, grid_sizes: List[List[int]], strides: List[List[Tensor]]) -> List[Tensor]: | ||
| anchors = [] | ||
| cell_anchors = self.cell_anchors | ||
| assert cell_anchors is not None | ||
|
|
||
| if not (len(grid_sizes) == len(strides) == len(cell_anchors)): | ||
| raise ValueError( | ||
| "Anchors should be Tuple[Tuple[int]] because each feature " | ||
| "map could potentially have different sizes and aspect ratios. " | ||
| "There needs to be a match between the number of " | ||
| "feature maps passed and the number of sizes / aspect ratios specified." | ||
| ) | ||
|
|
||
| for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors): | ||
| grid_height, grid_width = size | ||
| stride_height, stride_width = stride | ||
| device = base_anchors.device | ||
|
|
||
| # For output anchor, compute [x_center, y_center, x_center, y_center] | ||
| shifts_x = torch.arange(0, grid_width, dtype=torch.int32, device=device) * stride_width | ||
| shifts_y = torch.arange(0, grid_height, dtype=torch.int32, device=device) * stride_height | ||
| shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing="ij") | ||
| shift_x = shift_x.reshape(-1) | ||
| shift_y = shift_y.reshape(-1) | ||
| shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) | ||
|
|
||
| # For every (base anchor, output anchor) pair, | ||
| # offset each zero-centered base anchor by the center of the output anchor. | ||
| anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) | ||
| if cell_anchors is None: | ||
| raise RuntimeError("cell_anchors should not be None") | ||
| else: | ||
|
||
| if not (len(grid_sizes) == len(strides) == len(cell_anchors)): | ||
| raise ValueError( | ||
| "Anchors should be Tuple[Tuple[int]] because each feature " | ||
| "map could potentially have different sizes and aspect ratios. " | ||
| "There needs to be a match between the number of " | ||
| "feature maps passed and the number of sizes / aspect ratios specified." | ||
| ) | ||
|
|
||
| for size, stride, base_anchors in zip(grid_sizes, strides, cell_anchors): | ||
| grid_height, grid_width = size | ||
| stride_height, stride_width = stride | ||
| device = base_anchors.device | ||
|
|
||
| # For output anchor, compute [x_center, y_center, x_center, y_center] | ||
| shifts_x = torch.arange(0, grid_width, dtype=torch.int32, device=device) * stride_width | ||
| shifts_y = torch.arange(0, grid_height, dtype=torch.int32, device=device) * stride_height | ||
| shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x, indexing="ij") | ||
| shift_x = shift_x.reshape(-1) | ||
| shift_y = shift_y.reshape(-1) | ||
| shifts = torch.stack((shift_x, shift_y, shift_x, shift_y), dim=1) | ||
|
|
||
| # For every (base anchor, output anchor) pair, | ||
| # offset each zero-centered base anchor by the center of the output anchor. | ||
| anchors.append((shifts.view(-1, 1, 4) + base_anchors.view(1, -1, 4)).reshape(-1, 4)) | ||
|
|
||
| return anchors | ||
|
|
||
|
|
@@ -164,8 +164,8 @@ def __init__( | |
| clip: bool = True, | ||
| ): | ||
| super().__init__() | ||
| if steps is not None: | ||
| assert len(aspect_ratios) == len(steps) | ||
| if steps is not None and len(aspect_ratios) != len(steps): | ||
| raise RuntimeError("aspect_ratios and steps should have the same length") | ||
| self.aspect_ratios = aspect_ratios | ||
| self.steps = steps | ||
| self.clip = clip | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.