diff --git a/.github/workflows/tests-integration.yaml b/.github/workflows/tests-integration.yaml index 9353ca49..53edc3bb 100644 --- a/.github/workflows/tests-integration.yaml +++ b/.github/workflows/tests-integration.yaml @@ -23,7 +23,7 @@ jobs: steps: - name: Set expected providers for integration tests id: set_providers - run: echo "expected_providers=anthropic,bedrock,fireworks,gemini,huggingface,llama,mistral,nebius,openai,voyage,watsonx,xai,openrouter,cerebras" >> $GITHUB_OUTPUT + run: echo "expected_providers=anthropic,bedrock,cerebras,fireworks,gemini,huggingface,llama,mistral,nebius,openai,openrouter,perplexity,voyage,watsonx,xai" >> $GITHUB_OUTPUT - name: Set expected providers for local integration tests id: set_local_providers @@ -69,6 +69,7 @@ jobs: XAI_API_KEY: ${{ secrets.XAI_API_KEY }} OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }} CEREBRAS_API_KEY: ${{ secrets.CEREBRAS_API_KEY }} + PERPLEXITY_API_KEY: ${{ secrets.PERPLEXITY_API_KEY }} EXPECTED_PROVIDERS: ${{ needs.expected-providers.outputs.providers }} INCLUDE_LOCAL_PROVIDERS: "false" run: | diff --git a/src/any_llm/providers/openai/base.py b/src/any_llm/providers/openai/base.py index 62948330..bba0bcc2 100644 --- a/src/any_llm/providers/openai/base.py +++ b/src/any_llm/providers/openai/base.py @@ -68,6 +68,9 @@ def _convert_completion_chunk_response(response: Any, **kwargs: Any) -> ChatComp ) response.created = int(response.created) normalized_chunk = _normalize_openai_dict_response(response.model_dump()) + # Some APIs (i.e. Perplexity) return `chat.completion` without the chunk + # We can hardcode it as openai expects a literal + normalized_chunk["object"] = "chat.completion.chunk" return ChatCompletionChunk.model_validate(normalized_chunk) # If it's already our ChatCompletionChunk type, return it if isinstance(response, ChatCompletionChunk): diff --git a/src/any_llm/providers/perplexity/perplexity.py b/src/any_llm/providers/perplexity/perplexity.py index 2317e2ef..40be40a1 100644 --- a/src/any_llm/providers/perplexity/perplexity.py +++ b/src/any_llm/providers/perplexity/perplexity.py @@ -15,4 +15,6 @@ class PerplexityProvider(BaseOpenAIProvider): SUPPORTS_COMPLETION = True SUPPORTS_RESPONSES = False SUPPORTS_COMPLETION_REASONING = False + SUPPORTS_COMPLETION_PDF = False SUPPORTS_EMBEDDING = False + SUPPORTS_LIST_MODELS = False diff --git a/tests/conftest.py b/tests/conftest.py index b0f678bb..693cf208 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -59,7 +59,7 @@ def provider_model_map() -> dict[LLMProvider, str]: LLMProvider.LLAMA: "Llama-4-Maverick-17B-128E-Instruct-FP8", LLMProvider.AZURE: "openai/gpt-4.1-nano", LLMProvider.AZUREOPENAI: "azure/", - LLMProvider.PERPLEXITY: "llama-3.1-sonar-small-128k-chat", + LLMProvider.PERPLEXITY: "sonar", LLMProvider.OPENROUTER: "meta-llama/llama-3.3-8b-instruct:free", LLMProvider.LLAMACPP: "N/A", } diff --git a/tests/integration/test_tool.py b/tests/integration/test_tool.py index e54dd6cc..d61c29cb 100644 --- a/tests/integration/test_tool.py +++ b/tests/integration/test_tool.py @@ -20,8 +20,8 @@ async def test_tool( provider_client_config: dict[LLMProvider, dict[str, Any]], ) -> None: """Test that all supported providers can be loaded successfully.""" - if provider == LLMProvider.LLAMAFILE: - pytest.skip("Llamafile does not support tools, skipping") + if provider in (LLMProvider.LLAMAFILE, LLMProvider.PERPLEXITY): + pytest.skip(f"{provider} does not support tools, skipping") def echo(message: str) -> str: """Tool function to get the capital of a city."""