Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion .github/workflows/tests-integration.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ jobs:
steps:
- name: Set expected providers for integration tests
id: set_providers
run: echo "expected_providers=anthropic,bedrock,fireworks,gemini,huggingface,llama,mistral,nebius,openai,voyage,watsonx,xai,openrouter,cerebras" >> $GITHUB_OUTPUT
run: echo "expected_providers=anthropic,bedrock,cerebras,fireworks,gemini,huggingface,llama,mistral,nebius,openai,openrouter,perplexity,voyage,watsonx,xai" >> $GITHUB_OUTPUT

- name: Set expected providers for local integration tests
id: set_local_providers
Expand Down Expand Up @@ -69,6 +69,7 @@ jobs:
XAI_API_KEY: ${{ secrets.XAI_API_KEY }}
OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }}
CEREBRAS_API_KEY: ${{ secrets.CEREBRAS_API_KEY }}
PERPLEXITY_API_KEY: ${{ secrets.PERPLEXITY_API_KEY }}
EXPECTED_PROVIDERS: ${{ needs.expected-providers.outputs.providers }}
INCLUDE_LOCAL_PROVIDERS: "false"
run: |
Expand Down
3 changes: 3 additions & 0 deletions src/any_llm/providers/openai/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -68,6 +68,9 @@ def _convert_completion_chunk_response(response: Any, **kwargs: Any) -> ChatComp
)
response.created = int(response.created)
normalized_chunk = _normalize_openai_dict_response(response.model_dump())
# Some APIs (i.e. Perplexity) return `chat.completion` without the chunk
# We can hardcode it as openai expects a literal
normalized_chunk["object"] = "chat.completion.chunk"
return ChatCompletionChunk.model_validate(normalized_chunk)
# If it's already our ChatCompletionChunk type, return it
if isinstance(response, ChatCompletionChunk):
Expand Down
2 changes: 2 additions & 0 deletions src/any_llm/providers/perplexity/perplexity.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,6 @@ class PerplexityProvider(BaseOpenAIProvider):
SUPPORTS_COMPLETION = True
SUPPORTS_RESPONSES = False
SUPPORTS_COMPLETION_REASONING = False
SUPPORTS_COMPLETION_PDF = False
SUPPORTS_EMBEDDING = False
SUPPORTS_LIST_MODELS = False
2 changes: 1 addition & 1 deletion tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def provider_model_map() -> dict[LLMProvider, str]:
LLMProvider.LLAMA: "Llama-4-Maverick-17B-128E-Instruct-FP8",
LLMProvider.AZURE: "openai/gpt-4.1-nano",
LLMProvider.AZUREOPENAI: "azure/<your_deployment_name>",
LLMProvider.PERPLEXITY: "llama-3.1-sonar-small-128k-chat",
LLMProvider.PERPLEXITY: "sonar",
LLMProvider.OPENROUTER: "meta-llama/llama-3.3-8b-instruct:free",
LLMProvider.LLAMACPP: "N/A",
}
Expand Down
4 changes: 2 additions & 2 deletions tests/integration/test_tool.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,8 @@ async def test_tool(
provider_client_config: dict[LLMProvider, dict[str, Any]],
) -> None:
"""Test that all supported providers can be loaded successfully."""
if provider == LLMProvider.LLAMAFILE:
pytest.skip("Llamafile does not support tools, skipping")
if provider in (LLMProvider.LLAMAFILE, LLMProvider.PERPLEXITY):
pytest.skip(f"{provider} does not support tools, skipping")

def echo(message: str) -> str:
"""Tool function to get the capital of a city."""
Expand Down