Skip to content

Commit 43a302f

Browse files
Copilotstephentoub
andauthored
Align sampling specification XML docs with spec revision d165cd6 (#1293)
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: stephentoub <2642209+stephentoub@users.noreply.github.com>
1 parent 49fbc51 commit 43a302f

File tree

2 files changed

+26
-1
lines changed

2 files changed

+26
-1
lines changed

src/ModelContextProtocol.Core/Protocol/CreateMessageRequestParams.cs

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -33,25 +33,38 @@ public sealed class CreateMessageRequestParams : RequestParams
3333
/// Gets or sets the maximum number of tokens to generate in the LLM response, as requested by the server.
3434
/// </summary>
3535
/// <remarks>
36+
/// <para>
3637
/// A token is generally a word or part of a word in the text. Setting this value helps control
3738
/// response length and computation time. The client can choose to sample fewer tokens than requested.
39+
/// </para>
40+
/// <para>
41+
/// The client must respect the <see cref="MaxTokens"/> parameter.
42+
/// </para>
3843
/// </remarks>
3944
[JsonPropertyName("maxTokens")]
4045
public required int MaxTokens { get; set; }
4146

4247
/// <summary>
4348
/// Gets or sets the messages requested by the server to be included in the prompt.
4449
/// </summary>
50+
/// <remarks>
51+
/// The list of messages in a sampling request should not be retained between separate requests.
52+
/// </remarks>
4553
[JsonPropertyName("messages")]
4654
public IList<SamplingMessage> Messages { get; set; } = [];
4755

4856
/// <summary>
4957
/// Gets or sets optional metadata to pass through to the LLM provider.
5058
/// </summary>
5159
/// <remarks>
60+
/// <para>
5261
/// The format of this metadata is provider-specific and can include model-specific settings or
5362
/// configuration that isn't covered by standard parameters. This allows for passing custom parameters
5463
/// that are specific to certain AI models or providers.
64+
/// </para>
65+
/// <para>
66+
/// The client may modify or ignore metadata.
67+
/// </para>
5568
/// </remarks>
5669
[JsonPropertyName("metadata")]
5770
public JsonElement? Metadata { get; set; }
@@ -90,6 +103,9 @@ public sealed class CreateMessageRequestParams : RequestParams
90103
/// sequence exactly matches one of the provided sequences. Common uses include ending markers like "END", punctuation
91104
/// like ".", or special delimiter sequences like "###".
92105
/// </para>
106+
/// <para>
107+
/// The client may modify or ignore stop sequences.
108+
/// </para>
93109
/// </remarks>
94110
[JsonPropertyName("stopSequences")]
95111
public IList<string>? StopSequences { get; set; }
@@ -106,6 +122,15 @@ public sealed class CreateMessageRequestParams : RequestParams
106122
/// <summary>
107123
/// Gets or sets the temperature to use for sampling, as requested by the server.
108124
/// </summary>
125+
/// <remarks>
126+
/// <para>
127+
/// Temperature controls randomness in model responses. Higher values produce higher randomness,
128+
/// and lower values produce more stable output. The valid range depends on the model provider.
129+
/// </para>
130+
/// <para>
131+
/// The client may modify or ignore this value.
132+
/// </para>
133+
/// </remarks>
109134
[JsonPropertyName("temperature")]
110135
public float? Temperature { get; set; }
111136

src/ModelContextProtocol.Core/Protocol/CreateMessageResult.cs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ public sealed class CreateMessageResult : Result
4141
/// <remarks>
4242
/// Standard values include:
4343
/// <list type="bullet">
44-
/// <item><term>endTurn</term><description>The model naturally completed its response.</description></item>
44+
/// <item><term>endTurn</term><description>The participant is yielding the conversation to the other party.</description></item>
4545
/// <item><term>maxTokens</term><description>The response was truncated due to reaching token limits.</description></item>
4646
/// <item><term>stopSequence</term><description>A specific stop sequence was encountered during generation.</description></item>
4747
/// <item><term>toolUse</term><description>The model wants to use one or more tools.</description></item>

0 commit comments

Comments
 (0)