|
257 | 257 | "models.layoutxlm": ["LayoutXLMProcessor"], |
258 | 258 | "models.led": ["LED_PRETRAINED_CONFIG_ARCHIVE_MAP", "LEDConfig", "LEDTokenizer"], |
259 | 259 | "models.levit": ["LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "LevitConfig"], |
| 260 | + "models.llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"], |
260 | 261 | "models.longformer": ["LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongformerConfig", "LongformerTokenizer"], |
261 | 262 | "models.longt5": ["LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP", "LongT5Config"], |
262 | 263 | "models.luke": ["LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP", "LukeConfig", "LukeTokenizer"], |
|
528 | 529 | _import_structure["models.deberta_v2"].append("DebertaV2Tokenizer") |
529 | 530 | _import_structure["models.fnet"].append("FNetTokenizer") |
530 | 531 | _import_structure["models.layoutxlm"].append("LayoutXLMTokenizer") |
| 532 | + _import_structure["models.llama"].append("LlamaTokenizer") |
531 | 533 | _import_structure["models.m2m_100"].append("M2M100Tokenizer") |
532 | 534 | _import_structure["models.marian"].append("MarianTokenizer") |
533 | 535 | _import_structure["models.mbart"].append("MBartTokenizer") |
|
1468 | 1470 | "LevitPreTrainedModel", |
1469 | 1471 | ] |
1470 | 1472 | ) |
| 1473 | + _import_structure["models.llama"].extend( |
| 1474 | + [ |
| 1475 | + "LlamaForCausalLM", |
| 1476 | + "LlamaModel", |
| 1477 | + "LlamaPreTrainedModel", |
| 1478 | + ] |
| 1479 | + ) |
1471 | 1480 | _import_structure["models.longformer"].extend( |
1472 | 1481 | [ |
1473 | 1482 | "LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST", |
|
3260 | 3269 | from .models.layoutxlm import LayoutXLMProcessor |
3261 | 3270 | from .models.led import LED_PRETRAINED_CONFIG_ARCHIVE_MAP, LEDConfig, LEDTokenizer |
3262 | 3271 | from .models.levit import LEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, LevitConfig |
| 3272 | + from .models.llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig |
3263 | 3273 | from .models.longformer import LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, LongformerConfig, LongformerTokenizer |
3264 | 3274 | from .models.longt5 import LONGT5_PRETRAINED_CONFIG_ARCHIVE_MAP, LongT5Config |
3265 | 3275 | from .models.luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig, LukeTokenizer |
|
3504 | 3514 | from .models.deberta_v2 import DebertaV2Tokenizer |
3505 | 3515 | from .models.fnet import FNetTokenizer |
3506 | 3516 | from .models.layoutxlm import LayoutXLMTokenizer |
| 3517 | + from .models.llama import LlamaTokenizer |
3507 | 3518 | from .models.m2m_100 import M2M100Tokenizer |
3508 | 3519 | from .models.marian import MarianTokenizer |
3509 | 3520 | from .models.mbart import MBart50Tokenizer, MBartTokenizer |
|
3747 | 3758 | from .generation_utils import top_k_top_p_filtering |
3748 | 3759 | from .modeling_utils import PreTrainedModel |
3749 | 3760 |
|
3750 | | - # PyTorch model imports |
3751 | 3761 | from .models.albert import ( |
3752 | 3762 | ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST, |
3753 | 3763 | AlbertForMaskedLM, |
|
4256 | 4266 | LevitModel, |
4257 | 4267 | LevitPreTrainedModel, |
4258 | 4268 | ) |
| 4269 | +<<<<<<< HEAD |
| 4270 | +======= |
| 4271 | + from .models.lilt import ( |
| 4272 | + LILT_PRETRAINED_MODEL_ARCHIVE_LIST, |
| 4273 | + LiltForQuestionAnswering, |
| 4274 | + LiltForSequenceClassification, |
| 4275 | + LiltForTokenClassification, |
| 4276 | + LiltModel, |
| 4277 | + LiltPreTrainedModel, |
| 4278 | + ) |
| 4279 | + from .models.llama import ( |
| 4280 | + LlamaForCausalLM, |
| 4281 | + LlamaModel, |
| 4282 | + LlamaPreTrainedModel, |
| 4283 | + ) |
| 4284 | +>>>>>>> 0041be5b3... LLaMA Implementation (#21955) |
4259 | 4285 | from .models.longformer import ( |
4260 | 4286 | LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST, |
4261 | 4287 | LongformerForMaskedLM, |
|
0 commit comments