Skip to content
Merged
7 changes: 7 additions & 0 deletions common/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1442,6 +1442,10 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.merge_qkv = true;
return true;
}
if (arg == "-muge" || arg == "--merge-up-gate-expsrts") {
params.merge_up_gate_exps = true;
return true;
}
if (arg == "-khad" || arg == "--k-cache-hadamard") {
params.k_cache_hadamard = true;
return true;
Expand Down Expand Up @@ -2148,6 +2152,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
options.push_back({ "*", "-no-gr, --no-graph-reuse", "disable graph reuse (default: %s)", !params.graph_reuse ? "enabled" : "disabled" });
options.push_back({ "*", "-ser, --smart-expert-reduction", "experts reduction (default: %d,%g)", params.min_experts, params.thresh_experts});
options.push_back({ "*", "-mqkv, --merge-qkv,", "merge Q,K,V (default: %d)", params.merge_qkv});
options.push_back({ "*", "-muge, --merge-up-gate-experts,","merge ffn_up/gate_exps (default: %d)", params.merge_up_gate_exps});
options.push_back({ "*", "-khad, --k-cache-hadamard,", "Use Hadamard transform for K-cache (default: %d)", params.k_cache_hadamard});
options.push_back({ "*", "-smf16, --split-mode-f16,", "Use f16 for data exchange between GPUs (default: %d)", params.split_mode_f16});
options.push_back({ "*", "-smf32, --split-mode-f32,", "Use f32 for data exchange between GPUs (default: %d)", !params.split_mode_f16});
Expand Down Expand Up @@ -3088,6 +3093,7 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
mparams.use_thp = params.use_thp;
mparams.validate_quants = params.validate_quants;
mparams.merge_qkv = params.merge_qkv;
mparams.merge_up_gate_exps = params.merge_up_gate_exps;
if (params.kv_overrides.empty()) {
mparams.kv_overrides = NULL;
} else {
Expand Down Expand Up @@ -4134,6 +4140,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
fprintf(stream, "use_thp: %s # default: false\n", params.use_thp ? "true" : "false");
fprintf(stream, "validate_quants: %s # default: false\n", params.validate_quants ? "true" : "false");
fprintf(stream, "merge_qkv: %s # default: false\n", params.merge_qkv ? "true" : "false");
fprintf(stream, "merge_up_gate_exps: %s # default: false\n", params.merge_up_gate_exps ? "true" : "false");
fprintf(stream, "max_extra_alloc: %d # default: 256\n", params.max_extra_alloc_MiB);
fprintf(stream, "penalize_nl: %s # default: false\n", sparams.penalize_nl ? "true" : "false");
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
Expand Down
1 change: 1 addition & 0 deletions common/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -287,6 +287,7 @@ struct gpt_params {
bool validate_quants = false; // if true, check for NaNs while loading the model
bool only_active_exps = true; // if true, offload only active experts (relevant only for hybrid CPU/GPU)
bool merge_qkv = false; // if true, merge separate Q, K, V tensors into a single, contiguous tensor
bool merge_up_gate_exps= false; // if true, merge ffn_up_exps and ffn_gate_exps into a single, contiguous tensor
bool k_cache_hadamard = false; // if true, use Hadamard transform for the K-cache (only makes sense with quantized cache)
bool split_mode_graph_scheduling = false; // if true, force split mode graph scheduling
bool split_mode_f16 = true; // if true, intermediate results will be cast to f16 before copying to other GPUs to perform reduce ops
Expand Down
35 changes: 31 additions & 4 deletions examples/llama-bench/llama-bench.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -268,6 +268,7 @@ struct cmd_params {
bool use_thp = false;
bool no_ooae = false;
bool mqkv = false;
bool muge = false;
bool rcache = false;
output_formats output_format;
output_formats output_format_stderr;
Expand All @@ -293,7 +294,7 @@ static const cmd_params cmd_params_defaults = {
/* mla_attn */ {3},
/* attn_max_batch */ {0},
/* ser */ {{-1,0.0f}},
/* reuse */ {false},
/* reuse */ {true},
/* tensor_split */ {std::vector<float>(llama_max_devices(), 0.0f)},
/* use_mmap */ {true},
/* embeddings */ {false},
Expand All @@ -310,6 +311,7 @@ static const cmd_params cmd_params_defaults = {
/* use_thp */ false,
/* no_ooae */ false,
/* mqkv */ false,
/* muge */ false,
/* rcache */ false,
/* output_format */ MARKDOWN,
/* output_format_stderr */ NONE,
Expand Down Expand Up @@ -354,6 +356,7 @@ static void print_usage(int /* argc */, char ** argv) {
printf(" -rtr, --run-time-repack <0|1> (default: %s)\n", cmd_params_defaults.repack ? "1" : "0");
printf(" -cuda, --cuda-params <string> (default: %s)\n", cmd_params_defaults.cuda_params.c_str());
printf(" -mqkv, --merge-qkv (default: %s)\n", cmd_params_defaults.mqkv ? "1" : "0");
printf(" -muge, --merge-up-gate-experts (default: %s)\n", cmd_params_defaults.muge ? "1" : "0");
printf(" -rcache, --rope-cache (default: %s)\n", cmd_params_defaults.rcache ? "1" : "0");
printf(" -thp, --transparent-huge-pages <0|1> (default: %s)\n", cmd_params_defaults.use_thp? "1" : "0");
printf(" -ot, --override-tensor pattern (default: none)\n");
Expand Down Expand Up @@ -789,6 +792,12 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
break;
}
params.mqkv = std::stoi(argv[i]);
} else if (arg == "-muge" || arg == "--merge-up-gate-exps") {
if (++i >= argc) {
invalid_param = true;
break;
}
params.muge = std::stoi(argv[i]);
} else if (arg == "-rcache" || arg == "--rope-cache") {
if (++i >= argc) {
invalid_param = true;
Expand Down Expand Up @@ -926,6 +935,7 @@ struct cmd_params_instance {
bool use_thp = false;
bool no_ooae = false;
bool mqkv = false;
bool muge = false;
bool rcache = false;
const llama_model_tensor_buft_override* buft_overrides;

Expand All @@ -943,6 +953,7 @@ struct cmd_params_instance {
mparams.repack_tensors = repack;
mparams.use_thp = use_thp;
mparams.merge_qkv = mqkv;
mparams.merge_up_gate_exps = muge;
mparams.tensor_buft_overrides = buft_overrides;
mparams.mla = mla_attn;

Expand All @@ -958,6 +969,7 @@ struct cmd_params_instance {
use_mmap == other.use_mmap &&
repack == other.repack &&
mqkv == other.mqkv &&
muge == other.muge &&
use_thp == other.use_thp &&
tensor_split == other.tensor_split;
}
Expand Down Expand Up @@ -1047,6 +1059,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .use_thp = */ params.use_thp,
/* .no_ooae = */ params.no_ooae,
/* .mqkv = */ params.mqkv,
/* .muge = */ params.muge,
/* .rcache = */ params.rcache,
/* .buft_overrides=*/ params.buft_overrides.data(),
};
Expand Down Expand Up @@ -1088,6 +1101,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .use_thp = */ params.use_thp,
/* .no_ooae = */ params.no_ooae,
/* .mqkv = */ params.mqkv,
/* .muge = */ params.muge,
/* .rcache = */ params.rcache,
/* .buft_overrides=*/ params.buft_overrides.data(),
};
Expand Down Expand Up @@ -1129,6 +1143,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .use_thp = */ params.use_thp,
/* .no_ooae = */ params.no_ooae,
/* .mqkv = */ params.mqkv,
/* .muge = */ params.muge,
/* .rcache = */ params.rcache,
/* .buft_overrides=*/ params.buft_overrides.data(),
};
Expand Down Expand Up @@ -1170,6 +1185,7 @@ static std::vector<cmd_params_instance> get_cmd_params_instances(const cmd_param
/* .use_thp = */ params.use_thp,
/* .no_ooae = */ params.no_ooae,
/* .mqkv = */ params.mqkv,
/* .muge = */ params.muge,
/* .rcache = */ params.rcache,
/* .buft_overrides=*/ params.buft_overrides.data(),
};
Expand Down Expand Up @@ -1222,6 +1238,7 @@ struct test {
bool use_thp = false;
bool no_ooae = false;
bool mqkv = false;
bool muge = false;
bool rcache = false;
std::string override_tensor;
int n_prompt;
Expand Down Expand Up @@ -1259,6 +1276,7 @@ struct test {
embeddings = inst.embeddings;
repack = inst.repack;
mqkv = inst.mqkv;
muge = inst.muge;
fmoe = inst.fmoe;
ger = inst.ger;
rcache = inst.rcache;
Expand Down Expand Up @@ -1368,7 +1386,7 @@ struct test {
"n_threads", "type_k", "type_v",
"n_gpu_layers", "split_mode",
"main_gpu", "no_kv_offload", "flash_attn", "mla_attn", "attn_max_batch", "ser", "reuse",
"tensor_split", "use_mmap", "embeddings", "repack", "mqkv", "fused_moe", "grouped_er",
"tensor_split", "use_mmap", "embeddings", "repack", "mqkv", "muge", "fused_moe", "grouped_er",
"no_fused_up_gate", "use_thp", "no_ooae", "rcache", "cuda_params", "override_tensor",
"n_prompt", "n_gen", "test_time",
"avg_ns", "stddev_ns",
Expand All @@ -1392,7 +1410,7 @@ struct test {
field == "gpu_blas" || field == "blas" || field == "sycl" || field == "no_kv_offload" ||
field == "flash_attn" || field == "use_mmap" || field == "embeddings" || field == "repack" || field == "use_thp" ||
field == "fused_moe" || field == "grouped_er" || field == "no_fused_up_gate" || field == "no_ooae" || field == "mqkv" ||
field == "rcache" || field == "reuse") {
field == "rcache" || field == "reuse" || field == "muge") {
return BOOL;
}
if (field == "avg_ts" || field == "stddev_ts") {
Expand Down Expand Up @@ -1435,7 +1453,7 @@ struct test {
std::to_string(main_gpu), std::to_string(no_kv_offload), std::to_string(flash_attn),
std::to_string(mla_attn), std::to_string(attn_max_batch), ser_to_string(ser), std::to_string(reuse),
tensor_split_str, std::to_string(use_mmap), std::to_string(embeddings),
std::to_string(repack), std::to_string(mqkv), std::to_string(fmoe), std::to_string(ger),
std::to_string(repack), std::to_string(mqkv), std::to_string(muge), std::to_string(fmoe), std::to_string(ger),
std::to_string(no_fug), std::to_string(use_thp), std::to_string(no_ooae), std::to_string(rcache),
cuda_params, override_tensor,
std::to_string(n_prompt), std::to_string(n_gen), test_time,
Expand Down Expand Up @@ -1621,6 +1639,9 @@ struct markdown_printer : public printer {
if (field == "mqkv") {
return 4;
}
if (field == "muge") {
return 4;
}
if (field == "use_thp") {
return 3;
}
Expand Down Expand Up @@ -1688,6 +1709,9 @@ struct markdown_printer : public printer {
if (field == "mqkv") {
return "mqkv";
}
if (field == "muge") {
return "muge";
}
if (field == "use_thp") {
return "thp";
}
Expand Down Expand Up @@ -1791,6 +1815,9 @@ struct markdown_printer : public printer {
if (params.mqkv != cmd_params_defaults.mqkv) {
fields.emplace_back("mqkv");
}
if (params.muge != cmd_params_defaults.muge) {
fields.emplace_back("muge");
}
if (params.use_thp != cmd_params_defaults.use_thp) {
fields.emplace_back("use_thp");
}
Expand Down
Loading