|
22 | 22 | import timeout_decorator # noqa |
23 | 23 |
|
24 | 24 | from transformers import OPTConfig, is_torch_available |
25 | | -from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device |
| 25 | +from transformers.testing_utils import require_torch, slow, torch_device |
26 | 26 |
|
27 | 27 | from ...generation.test_generation_utils import GenerationTesterMixin |
28 | 28 | from ...test_configuration_common import ConfigTester |
@@ -266,25 +266,21 @@ def _long_tensor(tok_lst): |
266 | 266 |
|
267 | 267 |
|
268 | 268 | @require_torch |
269 | | -@require_sentencepiece |
270 | | -@require_tokenizers |
271 | 269 | class OPTModelIntegrationTests(unittest.TestCase): |
272 | 270 | @slow |
273 | 271 | def test_inference_no_head(self): |
274 | 272 | model = OPTModel.from_pretrained("facebook/opt-350m").to(torch_device) |
275 | 273 | input_ids = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]]) |
276 | | - attention_mask = input_ids.ne(model.config.pad_token_id) |
277 | 274 | with torch.no_grad(): |
278 | | - output = model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state |
| 275 | + output = model(input_ids=input_ids).last_hidden_state |
279 | 276 | expected_shape = torch.Size((1, 11, 512)) |
280 | 277 | self.assertEqual(output.shape, expected_shape) |
281 | 278 | expected_slice = torch.tensor( |
282 | | - [[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]], device=torch_device |
| 279 | + [[-0.2867, -1.9256, -0.3062], [-1.2711, -0.1337, -0.1897], [0.4109, 0.1187, -1.3142]], device=torch_device |
283 | 280 | ) |
284 | 281 | self.assertTrue(torch.allclose(output[:, :3, :3], expected_slice, atol=1e-3)) |
285 | 282 |
|
286 | 283 |
|
287 | | -@require_tokenizers |
288 | 284 | @require_torch |
289 | 285 | @slow |
290 | 286 | class OPTEmbeddingsTest(unittest.TestCase): |
|
0 commit comments