Skip to content

Commit dcf0935

Browse files
authored
fix(llamafile): Don't onverride __init__. (#457)
Only override `_verify_and_set_api_key`.
1 parent 02d8a2d commit dcf0935

File tree

1 file changed

+3
-7
lines changed

1 file changed

+3
-7
lines changed

src/any_llm/providers/llamafile/llamafile.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import os
21
from collections.abc import AsyncIterator
32
from typing import Any
43

@@ -20,17 +19,14 @@ class LlamafileProvider(BaseOpenAIProvider):
2019
SUPPORTS_COMPLETION_IMAGE = False
2120
SUPPORTS_COMPLETION_PDF = False
2221

23-
def __init__(self, config: ClientConfig) -> None:
24-
"""We don't use the Provider init because by default we don't require an API key."""
22+
def _verify_and_set_api_key(self, config: ClientConfig) -> ClientConfig:
23+
config.api_key = ""
2524

26-
self.url = config.api_base or os.getenv("LLAMAFILE_API_URL")
27-
self.config = config
28-
self.config.api_key = "" # In order to be compatible with the OpenAI client, the API key cannot be None if the OPENAI_API_KEY environment variable is not set (which is the case for LlamaFile)
25+
return config
2926

3027
async def _acompletion(
3128
self, params: CompletionParams, **kwargs: Any
3229
) -> ChatCompletion | AsyncIterator[ChatCompletionChunk]:
33-
"""Handle completion - extracted to avoid generator issues."""
3430
if params.response_format:
3531
msg = "response_format"
3632
raise UnsupportedParameterError(

0 commit comments

Comments
 (0)