Skip to content
38 changes: 33 additions & 5 deletions tests/models/decoder_only/language/test_mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pytest

from vllm import SamplingParams
from vllm import LLM

from ...utils import check_logprobs_close

Expand All @@ -16,6 +17,10 @@
]

SAMPLING_PARAMS = SamplingParams(max_tokens=512, temperature=0.0, logprobs=5)
SYMBOLIC_LANG_PROMPTS = [
"勇敢な船乗りについての詩を書く", # japanese
"寫一首關於勇敢的水手的詩", # chinese
]

# for function calling
TOOLS = [{
Expand Down Expand Up @@ -60,7 +65,7 @@
'{"city": "Dallas", "state": "TX", "unit": "fahrenheit"}}]')


@pytest.mark.parametrize("model", MODELS)
@pytest.mark.parametrize("model", MODELS[2:])
@pytest.mark.parametrize("dtype", ["bfloat16"])
@pytest.mark.parametrize("max_tokens", [64])
@pytest.mark.parametrize("num_logprobs", [5])
Expand All @@ -74,15 +79,15 @@ def test_models(
num_logprobs: int,
) -> None:
# TODO(sang): Sliding window should be tested separately.
with hf_runner(model, dtype=dtype) as hf_model:
hf_outputs = hf_model.generate_greedy_logprobs_limit(
example_prompts, max_tokens, num_logprobs)

with vllm_runner(model, dtype=dtype,
tokenizer_mode="mistral") as vllm_model:
vllm_outputs = vllm_model.generate_greedy_logprobs(
example_prompts, max_tokens, num_logprobs)

with hf_runner(model, dtype=dtype) as hf_model:
hf_outputs = hf_model.generate_greedy_logprobs_limit(
example_prompts, max_tokens, num_logprobs)

check_logprobs_close(
outputs_0_lst=hf_outputs,
outputs_1_lst=vllm_outputs,
Expand Down Expand Up @@ -131,6 +136,29 @@ def test_mistral_format(
)


@pytest.mark.parametrize("model", MODELS[1:])
@pytest.mark.parametrize("dtype", ["bfloat16"])
@pytest.mark.parametrize("max_tokens", [64])
@pytest.mark.parametrize("num_logprobs", [5])
@pytest.mark.parametrize("prompt", SYMBOLIC_LANG_PROMPTS)
def test_mistral_symbolic_languages(
vllm_runner,
model: str,
dtype: str,
max_tokens: int,
num_logprobs: int,
prompt: str,
) -> None:
prompt = "hi"
msg = {"role": "user", "content": prompt}
llm = LLM(model=model,
tokenizer_mode="mistral",
config_format="mistral",
load_format="mistral")
outputs = llm.chat([msg], sampling_params=SAMPLING_PARAMS)
assert "�" not in outputs[0].outputs[0].text.strip()


@pytest.mark.parametrize("dtype", ["bfloat16"])
@pytest.mark.parametrize("model", MODELS[1:]) # v1 can't do func calling
def test_mistral_function_calling(
Expand Down
32 changes: 29 additions & 3 deletions vllm/transformers_utils/tokenizers/mistral.py
Original file line number Diff line number Diff line change
Expand Up @@ -175,10 +175,29 @@ def apply_chat_template(self,

def convert_tokens_to_string(self, tokens: List[str]) -> str:
if isinstance(self.tokenizer, Tekkenizer):
return "".join(t for t in tokens
if t not in self.tokenizer._all_special_tokens)
tokens = [
t for t in tokens
if t not in self.tokenizer._all_special_tokens
]

if any(isinstance(t, bytes) for t in tokens):
# we need to encode and decode all tokens again
shift = self.tokenizer.num_special_tokens
byte_tokens = [
t.encode("utf-8") if not isinstance(t, bytes) else t
for t in tokens
]
ids = [
self.tokenizer._tekken_token2id_nospecial[t] + shift
for t in byte_tokens
]
decoded = self.tokenizer.decode(ids)
else:
decoded = "".join(tokens)
else:
return self.tokenizer.decode(tokens) # type: ignore[arg-type]
decoded = self.tokenizer.decode(tokens) # type: ignore[arg-type]

return decoded

def decode(self, ids: Union[List[int], int]) -> str:
if isinstance(ids, int):
Expand All @@ -200,4 +219,11 @@ def convert_ids_to_tokens(
self.tokenizer)

tokens = [self.tokenizer.id_to_piece(id) for id in ids]

if any(t.strip() == "�" for t in tokens):
# if any stripped decoded token is undefined
# because it's invalid unicode then pass bytes
# See: https://github.com/vllm-project/vllm/pull/8640
tokens = [self.tokenizer.id_to_byte_piece(id) for id in ids]

return tokens