-
Notifications
You must be signed in to change notification settings - Fork 31.9k
🚨🚨🚨 [Refactor] Move third-party related utility files into integrations/ folder 🚨🚨🚨
#25599
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 7 commits
b04903b
9453fa2
7c0b4bb
582fbde
190b83f
026f53c
b30adec
f8afb0a
de497d4
b2e1672
e4e245b
5668474
f4d8c83
bc7a6ae
c80cfd1
656c411
80d2775
7cc7cbb
b4c4cf7
bd95ee2
615ac14
b8fcf61
be38218
310ceb1
bb0a025
b756ace
080fc2f
d50051a
72fd103
5773b33
8ace6bd
10d3b77
7b6098c
89f4ebd
3107a96
fa451d4
33412d3
4b4c681
10d6e18
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -111,8 +111,10 @@ | |
| "is_tensorboard_available", | ||
| "is_wandb_available", | ||
| ], | ||
| "lib_integrations": [], | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Missing an
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. That key is already defined above: https://github.com/younesbelkada/transformers/blob/move-integrations/src/transformers/__init__.py#L108 |
||
| "lib_integrations.peft": [], | ||
| "integrations.bitsandbytes": [], | ||
| "integrations.deepspeed": [], | ||
| "integrations.integration_utils": [], | ||
| "integrations.peft": [], | ||
|
||
| "modelcard": ["ModelCard"], | ||
| "modeling_tf_pytorch_utils": [ | ||
| "convert_tf_weight_name_to_pt_weight_name", | ||
|
|
@@ -733,7 +735,6 @@ | |
| "is_vision_available", | ||
| "logging", | ||
| ], | ||
| "utils.bitsandbytes": [], | ||
| "utils.quantization_config": ["BitsAndBytesConfig", "GPTQConfig"], | ||
| } | ||
|
|
||
|
|
@@ -989,7 +990,6 @@ | |
| "TextDataset", | ||
| "TextDatasetForNextSentencePrediction", | ||
| ] | ||
| _import_structure["deepspeed"] = [] | ||
| _import_structure["generation"].extend( | ||
| [ | ||
| "BeamScorer", | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,54 @@ | ||
| # Copyright 2023 The HuggingFace Team. All rights reserved. | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| from .bitsandbytes import bitsandbytes | ||
| from .deepspeed import deepspeed | ||
| from .integration_utils import ( | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For an additional followup, would be great to split all of those in their respective modules (have one for wandb, one for cometml etc)
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Agreed! |
||
| INTEGRATION_TO_CALLBACK, | ||
| AzureMLCallback, | ||
| ClearMLCallback, | ||
| CodeCarbonCallback, | ||
| CometCallback, | ||
| DagsHubCallback, | ||
| FlyteCallback, | ||
| MLflowCallback, | ||
| NeptuneCallback, | ||
| NeptuneMissingConfiguration, | ||
| TensorBoardCallback, | ||
| WandbCallback, | ||
| get_available_reporting_integrations, | ||
| get_reporting_integration_callbacks, | ||
| hp_params, | ||
| is_azureml_available, | ||
| is_clearml_available, | ||
| is_codecarbon_available, | ||
| is_comet_available, | ||
| is_dagshub_available, | ||
| is_fairscale_available, | ||
| is_flyte_deck_standard_available, | ||
| is_flytekit_available, | ||
| is_mlflow_available, | ||
| is_neptune_available, | ||
| is_optuna_available, | ||
| is_ray_available, | ||
| is_ray_tune_available, | ||
| is_sigopt_available, | ||
| is_tensorboard_available, | ||
| is_wandb_available, | ||
| rewrite_logs, | ||
| run_hp_search_optuna, | ||
| run_hp_search_ray, | ||
| run_hp_search_sigopt, | ||
| run_hp_search_wandb, | ||
| ) | ||
| from .peft import PeftAdapterMixin | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,25 @@ | ||
| # Copyright 2023 The HuggingFace Team. All rights reserved. | ||
| # | ||
| # Licensed under the Apache License, Version 2.0 (the "License"); | ||
| # you may not use this file except in compliance with the License. | ||
| # You may obtain a copy of the License at | ||
| # | ||
| # http://www.apache.org/licenses/LICENSE-2.0 | ||
| # | ||
| # Unless required by applicable law or agreed to in writing, software | ||
| # distributed under the License is distributed on an "AS IS" BASIS, | ||
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||
| # See the License for the specific language governing permissions and | ||
| # limitations under the License. | ||
| from .deepspeed import ( | ||
| HfDeepSpeedConfig, | ||
| HfTrainerDeepSpeedConfig, | ||
| deepspeed_config, | ||
| deepspeed_init, | ||
| deepspeed_load_checkpoint, | ||
| deepspeed_optim_sched, | ||
| is_deepspeed_available, | ||
| is_deepspeed_zero3_enabled, | ||
| set_hf_deepspeed_config, | ||
| unset_hf_deepspeed_config, | ||
| ) |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -35,10 +35,10 @@ | |
|
|
||
| from .activations import get_activation | ||
| from .configuration_utils import PretrainedConfig | ||
| from .deepspeed import deepspeed_config, is_deepspeed_zero3_enabled | ||
| from .dynamic_module_utils import custom_object_save | ||
| from .generation import GenerationConfig, GenerationMixin | ||
| from .lib_integrations import PeftAdapterMixin | ||
| from .integrations import PeftAdapterMixin | ||
| from .integrations.deepspeed import deepspeed_config, is_deepspeed_zero3_enabled | ||
|
||
| from .pytorch_utils import ( # noqa: F401 | ||
| Conv1D, | ||
| apply_chunking_to_forward, | ||
|
|
@@ -660,7 +660,7 @@ def _load_state_dict_into_meta_model( | |
| # they won't get loaded. | ||
|
|
||
| if is_quantized: | ||
| from .utils.bitsandbytes import set_module_quantized_tensor_to_device | ||
| from .integrations.bitsandbytes import set_module_quantized_tensor_to_device | ||
|
||
|
|
||
| error_msgs = [] | ||
|
|
||
|
|
@@ -2937,7 +2937,7 @@ def from_pretrained( | |
| keep_in_fp32_modules = [] | ||
|
|
||
| if load_in_8bit or load_in_4bit: | ||
| from .utils.bitsandbytes import get_keys_to_not_convert, replace_with_bnb_linear | ||
| from .integrations.bitsandbytes import get_keys_to_not_convert, replace_with_bnb_linear | ||
|
|
||
| llm_int8_skip_modules = quantization_config.llm_int8_skip_modules | ||
| load_in_8bit_fp32_cpu_offload = quantization_config.llm_int8_enable_fp32_cpu_offload | ||
|
|
@@ -3255,7 +3255,7 @@ def _load_pretrained_model( | |
| ): | ||
| is_safetensors = False | ||
| if is_quantized: | ||
| from .utils.bitsandbytes import set_module_quantized_tensor_to_device | ||
| from .integrations.bitsandbytes import set_module_quantized_tensor_to_device | ||
|
|
||
| if device_map is not None and "disk" in device_map.values(): | ||
| archive_file = ( | ||
|
|
||
Uh oh!
There was an error while loading. Please reload this page.