A comprehensive, type-safe Rust client library for the OpenRouter API.
- 🤖 Chat Completions API: Full support for
/v1/chat/completionsendpoint - 🆕 Responses API (Beta): Support for the new
/v1/responsesendpoint with reasoning capabilities - 🎭 Anthropic Messages API: Native support for Anthropic's Messages API format
- 🔢 Embeddings API: Text and multimodal embeddings support
- 📊 Models API: List, filter, and query available models
- 🏢 Providers API: List available model providers
- 📈 Generation API: Retrieve detailed generation metadata
- 🌊 Streaming: Real-time streaming responses using Server-Sent Events (SSE)
- 🔒 Type-Safe: Comprehensive types for all API requests and responses
- 🏗️ Builder Pattern: Ergonomic builder APIs for constructing requests
- ⚡ Async/Await: Fully async with tokio runtime
- 📦 Modular: Use only the features you need
- 🛠️ Error Handling: Detailed error types for different failure scenarios
Add this to your Cargo.toml:
[dependencies]
openrouter-rust = "0.1.0"Or with specific features:
[dependencies]
openrouter-rust = { version = "0.1.0", features = ["chat", "streaming", "embeddings", "anthropic"] }chat(default): Enable chat completions API (/v1/chat/completions)responses: Enable Responses API (/v1/responses)streaming(default): Enable streaming supportembeddings: Enable embeddings API (/v1/embeddings)anthropic: Enable Anthropic Messages API (/v1/messages)providers(default): Enable providers API (/v1/providers)models(default): Enable models API (/v1/models,/v1/models/count,/v1/models/user)generations(default): Enable generation metadata API (/v1/generation)
use openrouter_rust::{OpenRouterClient, ChatCompletionBuilder};
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
// Create a client
let client = OpenRouterClient::builder()
.api_key("your-openrouter-api-key")
.build()?;
// Build and send a request
let request = ChatCompletionBuilder::new("openai/gpt-3.5-turbo")
.system_message("You are a helpful assistant.")
.user_message("What is the meaning of life?")
.temperature(0.7)
.build();
let response = client.chat_completion(request).await?;
if let Some(ref content) = response.choices[0].message.content {
println!("Response: {}", content);
}
Ok(())
}use openrouter_rust::{OpenRouterClient, ChatCompletionBuilder};
let client = OpenRouterClient::builder()
.api_key(std::env::var("OPENROUTER_API_KEY")?)
.http_referer("https://your-site.com")
.x_title("My App")
.build()?;
let request = ChatCompletionBuilder::new("openai/gpt-3.5-turbo")
.system_message("You are a helpful coding assistant.")
.user_message("Write a function to reverse a string in Rust.")
.temperature(0.7)
.max_tokens(500)
.build();
let response = client.chat_completion(request).await?;
if let Some(ref content) = response.choices[0].message.content {
println!("{}", content);
}use openrouter_rust::{OpenRouterClient, ChatCompletionBuilder};
use futures::StreamExt;
let client = OpenRouterClient::builder()
.api_key(std::env::var("OPENROUTER_API_KEY")?)
.build()?;
let request = ChatCompletionBuilder::new("openai/gpt-3.5-turbo")
.user_message("Write a story about a robot learning to paint.")
.stream(true)
.build();
let stream = client.chat_completion_stream(request).await?;
let mut stream = stream;
while let Some(result) = stream.next().await {
match result {
Ok(chunk) => {
for choice in &chunk.choices {
if let Some(ref content) = choice.delta.content {
print!("{}", content);
}
}
}
Err(e) => eprintln!("Error: {}", e),
}
}use openrouter_rust::{OpenRouterClient, anthropic::AnthropicMessageBuilder};
let client = OpenRouterClient::builder()
.api_key(std::env::var("OPENROUTER_API_KEY")?)
.build()?;
let request = AnthropicMessageBuilder::new("anthropic/claude-3.5-sonnet", 1024)
.user_message("Explain how neural networks work.")
.thinking(2000)
.temperature(0.7)
.build();
let response = client.create_anthropic_message(request).await?;
for content in &response.content {
match content {
openrouter_rust::anthropic::AnthropicResponseContent::Text { text } => {
println!("{}", text);
}
openrouter_rust::anthropic::AnthropicResponseContent::Thinking { thinking, .. } => {
println!("Thinking: {}", thinking);
}
_ => {}
}
}
println!("Tokens used: {}", response.usage.input_tokens + response.usage.output_tokens);use openrouter_rust::{OpenRouterClient, embeddings::EmbeddingBuilder};
let client = OpenRouterClient::builder()
.api_key(std::env::var("OPENROUTER_API_KEY")?)
.build()?;
// Single text embedding
let request = EmbeddingBuilder::new("text-embedding-ada-002", "The quick brown fox")
.build();
let response = client.create_embedding(request).await?;
for item in &response.data {
match &item.embedding {
openrouter_rust::embeddings::EmbeddingData::FloatArray(vec) => {
println!("Embedding dimensions: {}", vec.len());
}
_ => {}
}
}
// Batch embeddings
let batch_request = EmbeddingBuilder::new_with_array(
"text-embedding-ada-002",
vec!["First text".to_string(), "Second text".to_string()]
).build();
let batch_response = client.create_embedding(batch_request).await?;
println!("Generated {} embeddings", batch_response.data.len());
// List available embedding models
let models = client.list_embedding_models().await?;
for model in &models.data {
println!("Model: {} - {}", model.id, model.description);
}use openrouter_rust::{OpenRouterClient, responses::ResponsesRequestBuilder};
let client = OpenRouterClient::builder()
.api_key(std::env::var("OPENROUTER_API_KEY")?)
.build()?;
let request = ResponsesRequestBuilder::new("anthropic/claude-3.5-sonnet")
.user_message("Explain quantum computing in simple terms.")
.reasoning("medium")
.temperature(0.8)
.build();
let response = client.create_response(request).await?;
for item in &response.output {
match item {
openrouter_rust::responses::OutputItem::Message { content, .. } => {
for c in content {
if let openrouter_rust::responses::OutputContent::Text { text, .. } = c {
println!("{}", text);
}
}
}
openrouter_rust::responses::OutputItem::Reasoning { summary, .. } => {
println!("Reasoning summary available");
}
_ => {}
}
}use openrouter_rust::{OpenRouterClient, models::ListModelsParams};
let client = OpenRouterClient::builder()
.api_key(std::env::var("OPENROUTER_API_KEY")?)
.build()?;
// List all models
let models = client.list_models(None).await?;
for model in &models.data {
println!("{} - {}", model.id, model.name);
println!(" Context length: {:?}", model.context_length);
println!(" Supports: {:?}", model.supported_parameters);
}
// Get model count
let count = client.get_models_count().await?;
println!("Total models available: {}", count.data.count);
// List models filtered by user preferences
let user_models = client.list_models_user().await?;
// Filter models by category
let params = ListModelsParams {
category: Some("programming".to_string()),
..Default::default()
};
let programming_models = client.list_models(Some(params)).await?;use openrouter_rust::OpenRouterClient;
let client = OpenRouterClient::builder()
.api_key(std::env::var("OPENROUTER_API_KEY")?)
.build()?;
let providers = client.list_providers().await?;
for provider in &providers.data {
println!("Provider: {}", provider.name);
println!(" Slug: {}", provider.slug);
if let Some(ref privacy) = provider.privacy_policy_url {
println!(" Privacy Policy: {}", privacy);
}
}use openrouter_rust::OpenRouterClient;
let client = OpenRouterClient::builder()
.api_key(std::env::var("OPENROUTER_API_KEY")?)
.build()?;
// After making a chat completion, get the generation ID from the response
let generation_id = "gen-1234567890abcdef";
let generation = client.get_generation(generation_id).await?;
let data = &generation.data;
println!("Generation ID: {}", data.id);
println!("Model: {}", data.model);
println!("Total cost: ${:.6}", data.total_cost);
println!("Tokens: {} prompt, {} completion",
data.tokens_prompt.unwrap_or(0.0),
data.tokens_completion.unwrap_or(0.0)
);
println!("Latency: {}ms", data.latency.unwrap_or(0.0));
println!("Provider: {:?}", data.provider_name);use openrouter_rust::{OpenRouterClient, ChatCompletionBuilder};
let request = ChatCompletionBuilder::new("openai/gpt-3.5-turbo")
.system_message("You are a helpful assistant that only responds in JSON.")
.user_message("List 3 programming languages and their paradigms.")
.response_format_json()
.build();
let response = client.chat_completion(request).await?;
// Response will be valid JSONuse openrouter_rust::{
OpenRouterClient,
ChatCompletionBuilder,
types::{Tool, Function},
};
use serde_json::json;
let weather_tool = Tool {
tool_type: "function".to_string(),
function: Function {
name: "get_weather".to_string(),
description: Some("Get the current weather in a location".to_string()),
parameters: json!({
"type": "object",
"properties": {
"location": {
"type": "string",
"description": "The city and state"
}
},
"required": ["location"]
}),
},
};
let request = ChatCompletionBuilder::new("openai/gpt-3.5-turbo")
.user_message("What's the weather in Paris?")
.tools(vec![weather_tool])
.build();let client = OpenRouterClient::builder()
.api_key("your-api-key") // Required
.base_url("https://openrouter.ai/api/v1") // Optional, defaults to official API
.http_referer("https://your-site.com") // Optional, for OpenRouter rankings
.x_title("Your App Name") // Optional, for OpenRouter rankings
.timeout(std::time::Duration::from_secs(60)) // Optional, default 60s
.build()?;The library provides detailed error types:
use openrouter_rust::OpenRouterError;
match result {
Err(OpenRouterError::ApiError { code, message }) => {
println!("API error {}: {}", code, message);
}
Err(OpenRouterError::AuthenticationError(msg)) => {
println!("Auth error: {}", msg);
}
Err(OpenRouterError::RateLimitError) => {
println!("Rate limit exceeded, retry later");
}
Err(OpenRouterError::HttpError(e)) => {
println!("HTTP error: {}", e);
}
Err(OpenRouterError::JsonError(e)) => {
println!("JSON error: {}", e);
}
_ => {}
}See the examples/ directory for complete working examples:
basic_chat.rs- Simple chat completionstreaming.rs- Real-time streamingresponses_api.rs- Using the new Responses APIanthropic_messages.rs- Anthropic Messages API usageembeddings.rs- Text and image embeddingsmodels_api.rs- Listing and filtering modelsproviders.rs- Listing providersgeneration_metadata.rs- Retrieving generation info
Run examples:
export OPENROUTER_API_KEY="your-api-key"
# Basic examples (chat features)
cargo run --example basic_chat
cargo run --example streaming --features streaming
cargo run --example anthropic_messages --features anthropic
# Advanced features
cargo run --example responses_api --features responses
cargo run --example embeddings --features embeddings
cargo run --example models_api --features models
cargo run --example providers --features providers
cargo run --example generation_metadata --features generations- ✅ Chat Completions (
/v1/chat/completions) - ✅ Responses API (
/v1/responses) - ✅ Anthropic Messages (
/v1/messages) - ✅ Embeddings (
/v1/embeddings,/v1/embeddings/models) - ✅ Models (
/v1/models,/v1/models/count,/v1/models/user) - ✅ Providers (
/v1/providers) - ✅ Generation Metadata (
/v1/generation) - ✅ Streaming (SSE)
- ✅ All standard parameters (temperature, top_p, max_tokens, etc.)
- ✅ Tools/Function calling
- ✅ JSON mode
- ✅ Provider preferences
- ✅ Plugins (web search, auto-router, etc.)
- 🔄 Image generation
- 🔄 Audio APIs (TTS, STT)
- 🔄 Batch processing
- 🔄 Fine-tuning
This project is licensed under the MIT OR Apache-2.0 license.
Contributions are welcome! Please feel free to submit a Pull Request.
Made with ❤️ for the Rust community