Skip to content
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 23 additions & 1 deletion src/any_llm/providers/fireworks/fireworks.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,9 @@
from any_llm.provider import Provider
from any_llm.providers.helpers import create_completion_from_response
from any_llm.providers.fireworks.utils import _create_openai_chunk_from_fireworks_chunk
from any_llm.types.responses import Response, ResponseStreamEvent

from openai import Stream,OpenAI

class FireworksProvider(Provider):
PROVIDER_NAME = "Fireworks"
Expand All @@ -20,7 +22,7 @@ class FireworksProvider(Provider):

SUPPORTS_COMPLETION_STREAMING = True
SUPPORTS_COMPLETION = True
SUPPORTS_RESPONSES = False
SUPPORTS_RESPONSES = True
SUPPORTS_COMPLETION_REASONING = False
SUPPORTS_EMBEDDING = False

Expand Down Expand Up @@ -74,3 +76,23 @@ def _make_api_call(
provider_name="Fireworks",
model=model,
)

def responses(self, model, input_data, **kwargs) -> Response | Iterator[ResponseStreamEvent]:
"""Call Fireworks Responses API and normalize into ChatCompletion/Chunks."""
client = OpenAI(
base_url="https://api.fireworks.ai/inference/v1",
api_key=self.config.api_key,
)

response = client.responses.create(
model=model,
input=input_data,
**kwargs,
)

if not isinstance(response, (Response, Stream)):
raise ValueError(f"Responses API returned an unexpected type: {type(response)}")

response = Response.model_validate(response.model_dump())

return response
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Since currently the Any LLM response type is an alias to the OpenAI type, you may actually be able to make mypy happy by removing the model_validate line.

Suggested change
response = Response.model_validate(response.model_dump())
return response
return response

2 changes: 1 addition & 1 deletion src/any_llm/providers/fireworks/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -53,4 +53,4 @@ def _create_openai_chunk_from_fireworks_chunk(fireworks_chunk: Any) -> ChatCompl
model=getattr(fireworks_chunk, "model", "unknown"),
object="chat.completion.chunk",
usage=usage,
)
)
Loading