From fea831520a1d2be939722533deb5b02afb804a27 Mon Sep 17 00:00:00 2001 From: Forkoz <59298527+Ph0rk0z@users.noreply.github.com> Date: Sun, 4 Feb 2024 09:57:47 -0600 Subject: [PATCH 1/6] Change shared.py --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared.py b/modules/shared.py index e4bdacaa1e..6867791b02 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -129,6 +129,7 @@ group.add_argument('--logits_all', action='store_true', help='Needs to be set for perplexity evaluation to work. Otherwise, ignore it, as it makes prompt processing slower.') group.add_argument('--no_offload_kqv', action='store_true', help='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.') group.add_argument('--cache-capacity', type=str, help='Maximum cache capacity (llama-cpp-python). Examples: 2000MiB, 2GiB. When provided without units, bytes will be assumed.') +group.add_argument('--row_split', action='store_true', help='Split multi-gpu by row instead of layer. Faster on some cards.') # ExLlama group = parser.add_argument_group('ExLlama') From 572ab0525367309e19552fc5758ddf58c6766e15 Mon Sep 17 00:00:00 2001 From: Forkoz <59298527+Ph0rk0z@users.noreply.github.com> Date: Sun, 4 Feb 2024 09:59:06 -0600 Subject: [PATCH 2/6] Update llamacpp_hf.py --- modules/llamacpp_hf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/llamacpp_hf.py b/modules/llamacpp_hf.py index d491c463c8..4726669b37 100644 --- a/modules/llamacpp_hf.py +++ b/modules/llamacpp_hf.py @@ -216,7 +216,8 @@ def from_pretrained(cls, pretrained_model_name_or_path: Optional[Union[str, os.P 'tensor_split': tensor_split_list, 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb, 'logits_all': shared.args.logits_all, - 'offload_kqv': not shared.args.no_offload_kqv + 'offload_kqv': not shared.args.no_offload_kqv, + 'split_mode': 1 if not shared.args.row_split else 2 } Llama = llama_cpp_lib().Llama From b8c17a523aed5a4b33f30324f102eff395e6e87c Mon Sep 17 00:00:00 2001 From: Forkoz <59298527+Ph0rk0z@users.noreply.github.com> Date: Sun, 4 Feb 2024 09:59:36 -0600 Subject: [PATCH 3/6] Update llamacpp_model.py --- modules/llamacpp_model.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/llamacpp_model.py b/modules/llamacpp_model.py index 96ea98e901..7c405a4bae 100644 --- a/modules/llamacpp_model.py +++ b/modules/llamacpp_model.py @@ -95,7 +95,8 @@ def from_pretrained(self, path): 'rope_freq_base': RoPE.get_rope_freq_base(shared.args.alpha_value, shared.args.rope_freq_base), 'tensor_split': tensor_split_list, 'rope_freq_scale': 1.0 / shared.args.compress_pos_emb, - 'offload_kqv': not shared.args.no_offload_kqv + 'offload_kqv': not shared.args.no_offload_kqv, + 'split_mode': 1 if not shared.args.row_split else 2 } result.model = Llama(**params) From f35b7233bf48c89f7714449b72318179f1431b28 Mon Sep 17 00:00:00 2001 From: Forkoz <59298527+Ph0rk0z@users.noreply.github.com> Date: Sun, 4 Feb 2024 10:00:23 -0600 Subject: [PATCH 4/6] Update loaders.py --- modules/loaders.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/modules/loaders.py b/modules/loaders.py index 2976a85133..d9767769d8 100644 --- a/modules/loaders.py +++ b/modules/loaders.py @@ -44,6 +44,7 @@ 'cpu', 'numa', 'no_offload_kqv', + 'row_split', 'tensorcores', ], 'llamacpp_HF': [ @@ -66,6 +67,7 @@ 'no_use_fast', 'logits_all', 'no_offload_kqv', + 'row_split', 'tensorcores', 'llamacpp_HF_info', ], From 2b21b867ef3cb1659c11f9adbc2b96f18afd21ca Mon Sep 17 00:00:00 2001 From: Forkoz <59298527+Ph0rk0z@users.noreply.github.com> Date: Sun, 4 Feb 2024 10:00:58 -0600 Subject: [PATCH 5/6] Update ui.py --- modules/ui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ui.py b/modules/ui.py index b639c4dfdd..cd1193c936 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -93,6 +93,7 @@ def list_model_elements(): 'numa', 'logits_all', 'no_offload_kqv', + 'row_split', 'tensorcores', 'hqq_backend', ] From c86f44719532c2a7a3d8bf81f8e73fa75dc4938a Mon Sep 17 00:00:00 2001 From: Forkoz <59298527+Ph0rk0z@users.noreply.github.com> Date: Sun, 4 Feb 2024 10:03:07 -0600 Subject: [PATCH 6/6] Update ui_model_menu.py --- modules/ui_model_menu.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ui_model_menu.py b/modules/ui_model_menu.py index 441092a394..e81f6b60ca 100644 --- a/modules/ui_model_menu.py +++ b/modules/ui_model_menu.py @@ -107,6 +107,7 @@ def create_ui(): with gr.Column(): shared.gradio['tensorcores'] = gr.Checkbox(label="tensorcores", value=shared.args.tensorcores, info='Use llama-cpp-python compiled with tensor cores support. This increases performance on RTX cards. NVIDIA only.') shared.gradio['no_offload_kqv'] = gr.Checkbox(label="no_offload_kqv", value=shared.args.no_offload_kqv, info='Do not offload the K, Q, V to the GPU. This saves VRAM but reduces the performance.') + shared.gradio['row_split'] = gr.Checkbox(label="row_split", value=shared.args.row_split, info='Split model by rows across GPUs. Improves performance on some cards.') shared.gradio['triton'] = gr.Checkbox(label="triton", value=shared.args.triton) shared.gradio['no_inject_fused_attention'] = gr.Checkbox(label="no_inject_fused_attention", value=shared.args.no_inject_fused_attention, info='Disable fused attention. Fused attention improves inference performance but uses more VRAM. Fuses layers for AutoAWQ. Disable if running low on VRAM.') shared.gradio['no_inject_fused_mlp'] = gr.Checkbox(label="no_inject_fused_mlp", value=shared.args.no_inject_fused_mlp, info='Affects Triton only. Disable fused MLP. Fused MLP improves performance but uses more VRAM. Disable if running low on VRAM.')