Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 14 additions & 1 deletion common/arg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1283,6 +1283,16 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
params.swa_full = true;
}
).set_env("LLAMA_ARG_SWA_FULL"));
add_opt(common_arg(
{"-mtp", "--multi-token-prediction"},
"enable multi-token prediction (load MTP weights and auto-enable MTP speculative)",
[](common_params & params) {
params.mtp = true;
if (params.speculative.type == COMMON_SPECULATIVE_TYPE_NONE) {
params.speculative.type = COMMON_SPECULATIVE_TYPE_MTP;
}
}
).set_env("LLAMA_ARG_MTP"));
add_opt(common_arg(
{"-ctxcp", "--ctx-checkpoints", "--swa-checkpoints"}, "N",
string_format("max number of context checkpoints to create per slot (default: %d)"
Expand Down Expand Up @@ -3463,7 +3473,7 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
}
).set_examples({LLAMA_EXAMPLE_SPECULATIVE, LLAMA_EXAMPLE_SERVER, LLAMA_EXAMPLE_CLI}));
add_opt(common_arg(
{"--spec-type"}, "[none|ngram-cache|ngram-simple|ngram-map-k|ngram-map-k4v|ngram-mod]",
{"--spec-type"}, "[none|ngram-cache|ngram-simple|ngram-map-k|ngram-map-k4v|ngram-mod|mtp]",
string_format("type of speculative decoding to use when no draft model is provided (default: %s)\n",
common_speculative_type_to_str(params.speculative.type).c_str()),
[](common_params & params, const std::string & value) {
Expand All @@ -3479,6 +3489,9 @@ common_params_context common_params_parser_init(common_params & params, llama_ex
params.speculative.type = COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V;
} else if (value == "ngram-mod") {
params.speculative.type = COMMON_SPECULATIVE_TYPE_NGRAM_MOD;
} else if (value == "mtp") {
params.speculative.type = COMMON_SPECULATIVE_TYPE_MTP;
params.mtp = true;
} else {
throw std::invalid_argument("unknown speculative decoding type without draft model");
}
Expand Down
4 changes: 3 additions & 1 deletion common/common.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1343,12 +1343,14 @@ struct llama_model_params common_model_params_to_llama(common_params & params) {

mparams.progress_callback = params.load_progress_callback;
mparams.progress_callback_user_data = params.load_progress_callback_user_data;
mparams.mtp = params.mtp;

return mparams;
}

struct llama_context_params common_context_params_to_llama(const common_params & params) {
auto cparams = llama_context_default_params();
const bool mtp_needs_hidden_states = params.speculative.type == COMMON_SPECULATIVE_TYPE_MTP;

cparams.n_ctx = params.n_ctx;
cparams.n_seq_max = params.n_parallel;
Expand All @@ -1357,7 +1359,7 @@ struct llama_context_params common_context_params_to_llama(const common_params &
cparams.n_threads = params.cpuparams.n_threads;
cparams.n_threads_batch = params.cpuparams_batch.n_threads == -1 ?
params.cpuparams.n_threads : params.cpuparams_batch.n_threads;
cparams.embeddings = params.embedding;
cparams.embeddings = params.embedding || mtp_needs_hidden_states;
cparams.rope_scaling_type = params.rope_scaling_type;
cparams.rope_freq_base = params.rope_freq_base;
cparams.rope_freq_scale = params.rope_freq_scale;
Expand Down
4 changes: 3 additions & 1 deletion common/common.h
Original file line number Diff line number Diff line change
Expand Up @@ -175,6 +175,7 @@ enum common_speculative_type {
COMMON_SPECULATIVE_TYPE_NGRAM_MAP_K4V, // self-speculative decoding with n-gram keys and 4 m-gram values
COMMON_SPECULATIVE_TYPE_NGRAM_MOD,
COMMON_SPECULATIVE_TYPE_NGRAM_CACHE, // self-speculative decoding with 3-level n-gram cache
COMMON_SPECULATIVE_TYPE_MTP, // multi-token prediction (uses same model, dedicated draft context)
COMMON_SPECULATIVE_TYPE_COUNT // number of types, unknown type
};

Expand Down Expand Up @@ -292,7 +293,7 @@ struct common_params_speculative {

llama_model * model_dft = nullptr; // a llama_model that can be shared by multiple speculative contexts

llama_context_params cparams_dft; // these are the parameters for the draft llama_context
llama_context_params cparams_dft = llama_context_default_params(); // these are the parameters for the draft llama_context

int32_t n_ctx = 0; // draft context size
int32_t n_gpu_layers = -1; // number of layers to store in VRAM for the draft model (-1 - use default)
Expand Down Expand Up @@ -499,6 +500,7 @@ struct common_params {
bool no_op_offload = false; // globally disable offload host tensor operations to device
bool no_extra_bufts = false; // disable extra buffer types (used for weight repacking)
bool no_host = false; // bypass host buffer allowing extra buffers to be used
bool mtp = false; // enable multi-token prediction

bool single_turn = false; // single turn chat conversation

Expand Down
Loading