Skip to content

Commit aca6733

Browse files
committed
Fix errors
1 parent adaf33e commit aca6733

File tree

23 files changed

+212
-685
lines changed

23 files changed

+212
-685
lines changed

mistralrs-core/src/models/deepseek2.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,8 +93,6 @@ pub struct DeepSeekV2Config {
9393
pub(crate) kv_lora_rank: usize,
9494
pub(crate) v_head_dim: usize,
9595
pub(crate) qk_nope_head_dim: usize,
96-
#[serde(default = "use_flash_attn_default")]
97-
pub(crate) use_flash_attn: bool,
9896
pub(crate) quantization_config: Option<QuantizedConfig>,
9997
pub(crate) n_group: usize,
10098
pub(crate) topk_group: usize,

mistralrs-core/src/models/deepseek3.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,8 +93,6 @@ pub struct DeepSeekV3Config {
9393
pub(crate) kv_lora_rank: usize,
9494
pub(crate) v_head_dim: usize,
9595
pub(crate) qk_nope_head_dim: usize,
96-
#[serde(default = "use_flash_attn_default")]
97-
pub(crate) use_flash_attn: bool,
9896
pub(crate) quantization_config: Option<QuantizedConfig>,
9997
pub(crate) n_group: usize,
10098
pub(crate) topk_group: usize,

mistralrs-core/src/models/gemma.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,6 @@ pub struct Config {
5050

5151
#[serde(default = "default_max_position_embeddings")]
5252
pub max_position_embeddings: usize,
53-
pub use_flash_attn: bool,
5453
pub quantization_config: Option<QuantizedConfig>,
5554
#[serde(default = "word_emb_default")]
5655
#[allow(dead_code)]

mistralrs-core/src/models/gemma2.rs

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ use crate::{
2424
utils::{progress::NiceProgressBar, unvarbuilder::UnVarBuilder},
2525
};
2626

27-
#[derive(Debug, Clone, Default, serde::Serialize)]
27+
#[derive(Debug, Clone, Default, serde::Deserialize)]
2828
pub struct Config {
2929
pub attention_bias: bool,
3030
pub head_dim: usize,
@@ -45,7 +45,6 @@ pub struct Config {
4545
pub query_pre_attn_scalar: usize,
4646
pub max_position_embeddings: usize,
4747
pub quantization_config: Option<QuantizedConfig>,
48-
pub use_flash_attn: bool,
4948
#[allow(dead_code)]
5049
pub tie_word_embeddings: bool,
5150
}

mistralrs-core/src/models/llama.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,6 @@ pub struct Config {
4141
pub num_hidden_layers: usize,
4242
pub num_attention_heads: usize,
4343
pub num_key_value_heads: usize,
44-
#[serde(default = "use_flash_attn_default")]
45-
pub use_flash_attn: bool,
4644
pub rms_norm_eps: f64,
4745
pub rope_theta: f32,
4846
pub max_position_embeddings: usize,

mistralrs-core/src/models/mistral.rs

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,8 +42,6 @@ pub struct Config {
4242
pub(crate) rms_norm_eps: f64,
4343
pub(crate) rope_theta: f64,
4444
pub(crate) sliding_window: Option<usize>,
45-
#[serde(default = "use_flash_attn")]
46-
pub(crate) use_flash_attn: bool,
4745
pub(crate) head_dim: Option<usize>,
4846
pub(crate) quantization_config: Option<QuantizedConfig>,
4947
#[serde(default = "tie_word_embeddings")]

mistralrs-core/src/models/mixtral.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,6 @@ pub struct Config {
4545
pub(crate) sliding_window: Option<usize>,
4646
pub(crate) num_experts_per_tok: usize,
4747
pub(crate) num_local_experts: usize,
48-
pub(crate) use_flash_attn: bool,
4948
pub(crate) quantization_config: Option<QuantizedConfig>,
5049
#[serde(default = "word_emb_default")]
5150
pub(crate) tie_word_embeddings: bool,

mistralrs-core/src/models/phi2.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,6 @@ pub struct Config {
5151
pub(crate) rope_theta: f32,
5252
pub(crate) partial_rotary_factor: f64,
5353
pub(crate) qk_layernorm: bool,
54-
pub(crate) use_flash_attn: bool,
5554
pub(crate) quantization_config: Option<QuantizedConfig>,
5655
#[serde(default = "word_emb_default")]
5756
pub(crate) tie_word_embeddings: bool,

mistralrs-core/src/models/phi3.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,6 @@ pub struct Config {
4747
pub eos_token_id: Option<u32>,
4848
pub rope_scaling: Option<PhiRopeScalingConfig>,
4949
pub max_position_embeddings: usize,
50-
pub use_flash_attn: bool,
5150
pub sliding_window: Option<usize>,
5251
pub original_max_position_embeddings: usize,
5352
pub quantization_config: Option<QuantizedConfig>,

mistralrs-core/src/models/phi3_5_moe.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,6 @@ pub struct Config {
4343
pub(crate) rope_theta: f64,
4444
pub(crate) rope_scaling: Option<PhiRopeScalingConfig>,
4545
pub(crate) max_position_embeddings: usize,
46-
pub(crate) use_flash_attn: bool,
4746
pub(crate) sliding_window: Option<usize>,
4847
pub(crate) original_max_position_embeddings: usize,
4948
pub(crate) quantization_config: Option<QuantizedConfig>,

0 commit comments

Comments
 (0)