Skip to content

Commit c8073f5

Browse files
committed
[app-server] model list API
1 parent ab95eaa commit c8073f5

File tree

8 files changed

+360
-1
lines changed

8 files changed

+360
-1
lines changed

codex-rs/app-server-protocol/src/protocol.rs

Lines changed: 58 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -106,6 +106,13 @@ client_request_definitions! {
106106
params: ListConversationsParams,
107107
response: ListConversationsResponse,
108108
},
109+
#[serde(rename = "model/list")]
110+
#[ts(rename = "model/list")]
111+
/// List available Codex models along with display metadata.
112+
ListModels {
113+
params: ListModelsParams,
114+
response: ListModelsResponse,
115+
},
109116
/// Resume a recorded Codex conversation from a rollout file.
110117
ResumeConversation {
111118
params: ResumeConversationParams,
@@ -308,6 +315,40 @@ pub struct ListConversationsResponse {
308315
pub next_cursor: Option<String>,
309316
}
310317

318+
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default, JsonSchema, TS)]
319+
#[serde(rename_all = "camelCase")]
320+
pub struct ListModelsParams {
321+
/// Optional page size; defaults to a reasonable server-side value.
322+
#[serde(skip_serializing_if = "Option::is_none")]
323+
pub page_size: Option<usize>,
324+
/// Opaque pagination cursor returned by a previous call.
325+
#[serde(skip_serializing_if = "Option::is_none")]
326+
pub cursor: Option<String>,
327+
}
328+
329+
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
330+
#[serde(rename_all = "camelCase")]
331+
pub struct Model {
332+
pub id: String,
333+
pub slug: String,
334+
pub display_name: String,
335+
pub description: String,
336+
pub supported_reasoning_efforts: Vec<ReasoningEffort>,
337+
pub default_reasoning_effort: ReasoningEffort,
338+
// Only one model should be marked as default.
339+
pub is_default: bool,
340+
}
341+
342+
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
343+
#[serde(rename_all = "camelCase")]
344+
pub struct ListModelsResponse {
345+
pub items: Vec<Model>,
346+
/// Opaque cursor to pass to the next call to continue after the last item.
347+
/// if None, there are no more items to return.
348+
#[serde(skip_serializing_if = "Option::is_none")]
349+
pub next_cursor: Option<String>,
350+
}
351+
311352
#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)]
312353
#[serde(rename_all = "camelCase")]
313354
pub struct ResumeConversationParams {
@@ -999,4 +1040,21 @@ mod tests {
9991040
);
10001041
Ok(())
10011042
}
1043+
1044+
#[test]
1045+
fn serialize_list_models() -> Result<()> {
1046+
let request = ClientRequest::ListModels {
1047+
request_id: RequestId::Integer(2),
1048+
params: ListModelsParams::default(),
1049+
};
1050+
assert_eq!(
1051+
json!({
1052+
"method": "model/list",
1053+
"id": 2,
1054+
"params": {}
1055+
}),
1056+
serde_json::to_value(&request)?,
1057+
);
1058+
Ok(())
1059+
}
10021060
}

codex-rs/app-server/src/codex_message_processor.rs

Lines changed: 58 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
use crate::error_code::INTERNAL_ERROR_CODE;
22
use crate::error_code::INVALID_REQUEST_ERROR_CODE;
33
use crate::fuzzy_file_search::run_fuzzy_file_search;
4+
use crate::models::codex_models;
45
use crate::outgoing_message::OutgoingMessageSender;
56
use crate::outgoing_message::OutgoingNotification;
67
use codex_app_server_protocol::AddConversationListenerParams;
@@ -29,6 +30,8 @@ use codex_app_server_protocol::InterruptConversationResponse;
2930
use codex_app_server_protocol::JSONRPCErrorError;
3031
use codex_app_server_protocol::ListConversationsParams;
3132
use codex_app_server_protocol::ListConversationsResponse;
33+
use codex_app_server_protocol::ListModelsParams;
34+
use codex_app_server_protocol::ListModelsResponse;
3235
use codex_app_server_protocol::LoginApiKeyParams;
3336
use codex_app_server_protocol::LoginApiKeyResponse;
3437
use codex_app_server_protocol::LoginChatGptCompleteNotification;
@@ -111,7 +114,6 @@ use uuid::Uuid;
111114

112115
// Duration before a ChatGPT login attempt is abandoned.
113116
const LOGIN_CHATGPT_TIMEOUT: Duration = Duration::from_secs(10 * 60);
114-
115117
struct ActiveLogin {
116118
shutdown_handle: ShutdownHandle,
117119
login_id: Uuid,
@@ -172,6 +174,9 @@ impl CodexMessageProcessor {
172174
ClientRequest::ListConversations { request_id, params } => {
173175
self.handle_list_conversations(request_id, params).await;
174176
}
177+
ClientRequest::ListModels { request_id, params } => {
178+
self.list_models(request_id, params).await;
179+
}
175180
ClientRequest::ResumeConversation { request_id, params } => {
176181
self.handle_resume_conversation(request_id, params).await;
177182
}
@@ -831,6 +836,58 @@ impl CodexMessageProcessor {
831836
self.outgoing.send_response(request_id, response).await;
832837
}
833838

839+
async fn list_models(&self, request_id: RequestId, params: ListModelsParams) {
840+
let ListModelsParams { page_size, cursor } = params;
841+
let models = codex_models();
842+
let total = models.len();
843+
844+
if total == 0 {
845+
let response = ListModelsResponse {
846+
items: Vec::new(),
847+
next_cursor: None,
848+
};
849+
self.outgoing.send_response(request_id, response).await;
850+
return;
851+
}
852+
853+
let effective_page_size = page_size.unwrap_or(total).max(1).min(total);
854+
let start = match cursor {
855+
Some(cursor) => match cursor.parse::<usize>() {
856+
Ok(idx) => idx,
857+
Err(_) => {
858+
let error = JSONRPCErrorError {
859+
code: INVALID_REQUEST_ERROR_CODE,
860+
message: format!("invalid cursor: {cursor}"),
861+
data: None,
862+
};
863+
self.outgoing.send_error(request_id, error).await;
864+
return;
865+
}
866+
},
867+
None => 0,
868+
};
869+
870+
if start > total {
871+
let error = JSONRPCErrorError {
872+
code: INVALID_REQUEST_ERROR_CODE,
873+
message: format!("cursor {start} exceeds total models {total}"),
874+
data: None,
875+
};
876+
self.outgoing.send_error(request_id, error).await;
877+
return;
878+
}
879+
880+
let end = start.saturating_add(effective_page_size).min(total);
881+
let items = models[start..end].to_vec();
882+
let next_cursor = if end < total {
883+
Some(end.to_string())
884+
} else {
885+
None
886+
};
887+
let response = ListModelsResponse { items, next_cursor };
888+
self.outgoing.send_response(request_id, response).await;
889+
}
890+
834891
async fn handle_resume_conversation(
835892
&self,
836893
request_id: RequestId,

codex-rs/app-server/src/lib.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ mod codex_message_processor;
2727
mod error_code;
2828
mod fuzzy_file_search;
2929
mod message_processor;
30+
mod models;
3031
mod outgoing_message;
3132

3233
/// Size of the bounded channels used to communicate between tasks. The value

codex-rs/app-server/src/models.rs

Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
use codex_app_server_protocol::Model;
2+
use codex_protocol::config_types::ReasoningEffort;
3+
4+
const DEFAULT_MODEL_SLUG: &str = "gpt-5-codex";
5+
pub const DEFAULT_REASONING_EFFORT: ReasoningEffort = ReasoningEffort::Medium;
6+
7+
pub fn codex_models() -> Vec<Model> {
8+
vec![
9+
Model {
10+
id: DEFAULT_MODEL_SLUG.to_string(),
11+
slug: DEFAULT_MODEL_SLUG.to_string(),
12+
display_name: "GPT-5 Codex".to_string(),
13+
description: "Specialized GPT-5 variant optimized for Codex.".to_string(),
14+
supported_reasoning_efforts: vec![
15+
ReasoningEffort::Low,
16+
ReasoningEffort::Medium,
17+
ReasoningEffort::High,
18+
],
19+
default_reasoning_effort: DEFAULT_REASONING_EFFORT,
20+
is_default: true,
21+
},
22+
Model {
23+
id: "gpt-5".to_string(),
24+
slug: "gpt-5".to_string(),
25+
display_name: "GPT-5".to_string(),
26+
description: "General-purpose GPT-5 model.".to_string(),
27+
supported_reasoning_efforts: vec![
28+
ReasoningEffort::Minimal,
29+
ReasoningEffort::Low,
30+
ReasoningEffort::Medium,
31+
ReasoningEffort::High,
32+
],
33+
default_reasoning_effort: DEFAULT_REASONING_EFFORT,
34+
is_default: false,
35+
},
36+
]
37+
}

codex-rs/app-server/tests/common/mcp_process.rs

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@ use codex_app_server_protocol::GetAuthStatusParams;
2121
use codex_app_server_protocol::InitializeParams;
2222
use codex_app_server_protocol::InterruptConversationParams;
2323
use codex_app_server_protocol::ListConversationsParams;
24+
use codex_app_server_protocol::ListModelsParams;
2425
use codex_app_server_protocol::LoginApiKeyParams;
2526
use codex_app_server_protocol::NewConversationParams;
2627
use codex_app_server_protocol::RemoveConversationListenerParams;
@@ -264,6 +265,15 @@ impl McpProcess {
264265
self.send_request("listConversations", params).await
265266
}
266267

268+
/// Send a `model/list` JSON-RPC request.
269+
pub async fn send_list_models_request(
270+
&mut self,
271+
params: ListModelsParams,
272+
) -> anyhow::Result<i64> {
273+
let params = Some(serde_json::to_value(params)?);
274+
self.send_request("model/list", params).await
275+
}
276+
267277
/// Send a `resumeConversation` JSON-RPC request.
268278
pub async fn send_resume_conversation_request(
269279
&mut self,

codex-rs/app-server/tests/suite/mod.rs

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ mod fuzzy_file_search;
77
mod interrupt;
88
mod list_resume;
99
mod login;
10+
mod model_list;
1011
mod rate_limits;
1112
mod send_message;
1213
mod set_default_model;

0 commit comments

Comments
 (0)