Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions langchain_mcp_adapters/client.py
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

linting fixes

Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
"""Client for connecting to multiple MCP servers and loading LangChain tools/resources.
"""Client for connecting to multiple MCP servers and loading LC tools/resources.

This module provides the `MultiServerMCPClient` class for managing connections to multiple
MCP servers and loading tools, prompts, and resources from them.
This module provides the `MultiServerMCPClient` class for managing connections
to multiple MCP servers and loading tools, prompts, and resources from them.
"""

import asyncio
Expand Down
2 changes: 1 addition & 1 deletion langchain_mcp_adapters/interceptors.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@

try:
# langgraph installed
import langgraph
import langgraph # noqa: F401

LANGGRAPH_PRESENT = True
except ImportError:
Expand Down
153 changes: 123 additions & 30 deletions langchain_mcp_adapters/tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,9 +5,17 @@
"""

from collections.abc import Awaitable, Callable
from typing import Any, get_args
from typing import Any, TypedDict, get_args

from langchain_core.messages import ToolMessage
from langchain_core.messages.content import (
FileContentBlock,
ImageContentBlock,
TextContentBlock,
create_file_block,
create_image_block,
create_text_block,
)
from langchain_core.tools import (
BaseTool,
InjectedToolArg,
Expand All @@ -20,10 +28,13 @@
from mcp.server.fastmcp.utilities.func_metadata import ArgModelBase, FuncMetadata
from mcp.types import (
AudioContent,
BlobResourceContents,
ContentBlock,
EmbeddedResource,
ImageContent,
ResourceLink,
TextContent,
TextResourceContents,
)
from mcp.types import Tool as MCPTool
from pydantic import BaseModel, create_model
Expand All @@ -38,42 +49,118 @@

try:
# langgraph installed
import langgraph
from langgraph.types import Command

LANGGRAPH_PRESENT = True
except ImportError:
LANGGRAPH_PRESENT = False

NonTextContent = ImageContent | AudioContent | ResourceLink | EmbeddedResource
# Type alias for LangChain content blocks used in ToolMessage
ToolMessageContentBlock = TextContentBlock | ImageContentBlock | FileContentBlock

# Conditional type based on langgraph availability
if LANGGRAPH_PRESENT:
ConvertedToolResult = str | list[str] | ToolMessage | Command
ConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage | Command
else:
ConvertedToolResult = str | list[str] | ToolMessage
ConvertedToolResult = list[ToolMessageContentBlock] | ToolMessage

MAX_ITERATIONS = 1000


class MCPToolArtifact(TypedDict):
"""Artifact returned from MCP tool calls.

This TypedDict wraps the structured content from MCP tool calls,
allowing for future extension if MCP adds more fields to tool results.

Attributes:
structured_content: The structured content returned by the MCP tool,
corresponding to the structuredContent field in CallToolResult.
"""

structured_content: dict[str, Any]


def _convert_mcp_content_to_lc_block( # noqa: PLR0911
content: ContentBlock,
) -> ToolMessageContentBlock:
"""Convert any MCP content block to a LangChain content block.

Args:
content: MCP content object (TextContent, ImageContent, AudioContent,
ResourceLink, or EmbeddedResource).

Returns:
LangChain content block dict.

Raises:
NotImplementedError: If AudioContent is passed.
ValueError: If an unknown content type is passed.
"""
if isinstance(content, TextContent):
return create_text_block(text=content.text)

if isinstance(content, ImageContent):
return create_image_block(base64=content.data, mime_type=content.mimeType)

if isinstance(content, AudioContent):
msg = (
"AudioContent conversion to LangChain content blocks is not yet "
f"supported. Received audio with mime type: {content.mimeType}"
)
raise NotImplementedError(msg)

if isinstance(content, ResourceLink):
mime_type = content.mimeType or None
if mime_type and mime_type.startswith("image/"):
return create_image_block(url=str(content.uri), mime_type=mime_type)
return create_file_block(url=str(content.uri), mime_type=mime_type)

if isinstance(content, EmbeddedResource):
resource = content.resource
if isinstance(resource, TextResourceContents):
return create_text_block(text=resource.text)
if isinstance(resource, BlobResourceContents):
mime_type = resource.mimeType or None
if mime_type and mime_type.startswith("image/"):
return create_image_block(base64=resource.blob, mime_type=mime_type)
return create_file_block(base64=resource.blob, mime_type=mime_type)
msg = f"Unknown embedded resource type: {type(resource).__name__}"
raise ValueError(msg)

msg = f"Unknown MCP content type: {type(content).__name__}"
raise ValueError(msg)


def _convert_call_tool_result(
call_tool_result: MCPToolCallResult,
) -> tuple[ConvertedToolResult, list[NonTextContent] | None]:
) -> tuple[ConvertedToolResult, MCPToolArtifact | None]:
"""Convert MCP MCPToolCallResult to LangChain tool result format.

Converts MCP content blocks to LangChain content blocks:
- TextContent -> {"type": "text", "text": ...}
- ImageContent -> {"type": "image", "base64": ..., "mime_type": ...}
- ResourceLink (image/*) -> {"type": "image", "url": ..., "mime_type": ...}
- ResourceLink (other) -> {"type": "file", "url": ..., "mime_type": ...}
- EmbeddedResource (text) -> {"type": "text", "text": ...}
- EmbeddedResource (blob) -> {"type": "image", ...} or {"type": "file", ...}
- AudioContent -> raises NotImplementedError

Args:
call_tool_result: The result from calling an MCP tool. Can be either
a CallToolResult (MCP format), a ToolMessage (LangChain format),
or a Command (LangGraph format, if langgraph is installed).

Returns:
A tuple containing the text content (which may be a ToolMessage or Command)
and any non-text content. When a ToolMessage or Command is returned by an
interceptor, it's placed in the first position of the tuple as the content,
with None as the artifact.
A tuple containing:
- The content: either a string (single text), list of content blocks,
ToolMessage, or Command
- The artifact: MCPToolArtifact with structured_content if present,
otherwise None

Raises:
ToolException: If the tool call resulted in an error.
NotImplementedError: If AudioContent is encountered.
"""
# If the interceptor returned a ToolMessage directly, return it as the content
# with None as the artifact to match the content_and_artifact format
Expand All @@ -84,25 +171,31 @@ def _convert_call_tool_result(
if LANGGRAPH_PRESENT and isinstance(call_tool_result, Command):
return call_tool_result, None

# Otherwise, convert from CallToolResult
text_contents: list[TextContent] = []
non_text_contents = []
for content in call_tool_result.content:
if isinstance(content, TextContent):
text_contents.append(content)
else:
non_text_contents.append(content)

tool_content: str | list[str] = [content.text for content in text_contents]
if not text_contents:
tool_content = ""
elif len(text_contents) == 1:
tool_content = tool_content[0]
Comment on lines -87 to -100
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This is the big question, can we remove this funky str concat logic.

I think it'd be more consistent if we always populated tool messages w/ standard content blocks, and we're not on 1.0 yet, so we do have some room to make breaking changes.

On the other hand, the value here isn't that high, so I'm ok w/ leaving as is if desired to avoid pain for users.

cc @eyurtsev

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Do we expect this would break stuff? Would .text / .content continue working on tool messages as before? (with change being only on content_blocks?)

Copy link
Collaborator Author

@sydney-runkle sydney-runkle Dec 2, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This would break .content. So it's probably not worth the change.

BUT we are on 0.0.X here, so we have the ability to make this change if we want.

# Convert all MCP content blocks to LangChain content blocks
tool_content: list[ToolMessageContentBlock] = [
_convert_mcp_content_to_lc_block(content)
for content in call_tool_result.content
]

if call_tool_result.isError:
raise ToolException(tool_content)
# Join text from all blocks
error_parts = []
for item in tool_content:
if isinstance(item, str):
error_parts.append(item)
elif isinstance(item, dict) and item.get("type") == "text":
error_parts.append(item.get("text", ""))
error_msg = "\n".join(error_parts) if error_parts else str(tool_content)
raise ToolException(error_msg)

# Extract structured content and wrap in MCPToolArtifact
artifact: MCPToolArtifact | None = None
if call_tool_result.structuredContent is not None:
artifact = MCPToolArtifact(
structured_content=call_tool_result.structuredContent
)

return tool_content, non_text_contents or None
return tool_content, artifact


def _build_interceptor_chain(
Expand Down Expand Up @@ -209,17 +302,17 @@ def convert_mcp_tool_to_langchain_tool(
async def call_tool(
runtime: Any = None, # noqa: ANN401
**arguments: dict[str, Any],
) -> tuple[ConvertedToolResult, list[NonTextContent] | None]:
) -> tuple[ConvertedToolResult, MCPToolArtifact | None]:
"""Execute tool call with interceptor chain and return formatted result.

Args:
runtime: LangGraph tool runtime if available, otherwise None.
**arguments: Tool arguments as keyword args.

Returns:
A tuple of (text_content, non_text_content), where text_content may be
a ToolMessage or Command (if langgraph is installed) if an interceptor
returned one directly.
A tuple of (content, artifact) where:
- content: string, list of strings/content blocks, ToolMessage, or Command
- artifact: MCPToolArtifact with structured_content if present, else None
"""
mcp_callbacks = (
callbacks.to_mcp_format(
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ test = [
"types-setuptools>=69.0.0",
"websockets>=15.0.1",
"pytest-timeout>=2.4.0",
"dirty-equals>=0.9.0",
]

[project.urls]
Expand Down
6 changes: 5 additions & 1 deletion tests/test_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,11 @@ async def logging_callback(params, context):
result = await tool.ainvoke(
{"args": {"task": "test"}, "id": "1", "type": "tool_call"}
)
assert "Executed: test" in result.content
assert any(
"Executed: test" in block.get("text", "")
for block in result.content
if isinstance(block, dict)
)

# Verify both progress and logging callbacks were called
await asyncio.sleep(0.05) # Give time for callbacks to complete
Expand Down
17 changes: 11 additions & 6 deletions tests/test_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@

from langchain_mcp_adapters.client import MultiServerMCPClient
from langchain_mcp_adapters.tools import load_mcp_tools
from tests.utils import IsLangChainID


async def test_multi_server_mcp_client(
Expand Down Expand Up @@ -70,22 +71,24 @@ async def test_multi_server_mcp_client(
# Test that we can call a math tool
add_tool = next(tool for tool in all_tools if tool.name == "add")
result = await add_tool.ainvoke({"a": 2, "b": 3})
assert result == "5"
assert result == [{"type": "text", "text": "5", "id": IsLangChainID}]

# Test that we can call a weather tool
weather_tool = next(tool for tool in all_tools if tool.name == "get_weather")
result = await weather_tool.ainvoke({"location": "London"})
assert result == "It's always sunny in London"
assert result == [
{"type": "text", "text": "It's always sunny in London", "id": IsLangChainID}
]

# Test the multiply tool
multiply_tool = next(tool for tool in all_tools if tool.name == "multiply")
result = await multiply_tool.ainvoke({"a": 4, "b": 5})
assert result == "20"
assert result == [{"type": "text", "text": "20", "id": IsLangChainID}]

# Test that we can call a time tool
time_tool = next(tool for tool in all_tools if tool.name == "get_time")
result = await time_tool.ainvoke({"args": ""})
assert result == "5:20:00 PM EST"
assert result == [{"type": "text", "text": "5:20:00 PM EST", "id": IsLangChainID}]


async def test_multi_server_connect_methods(
Expand Down Expand Up @@ -117,7 +120,7 @@ async def test_multi_server_connect_methods(
tools = await load_mcp_tools(session)
assert len(tools) == 2
result = await tools[0].ainvoke({"a": 2, "b": 3})
assert result == "5"
assert result == [{"type": "text", "text": "5", "id": IsLangChainID}]

for tool in tools:
tool_names.add(tool.name)
Expand All @@ -126,7 +129,9 @@ async def test_multi_server_connect_methods(
tools = await load_mcp_tools(session)
assert len(tools) == 1
result = await tools[0].ainvoke({"args": ""})
assert result == "5:20:00 PM EST"
assert result == [
{"type": "text", "text": "5:20:00 PM EST", "id": IsLangChainID}
]

for tool in tools:
tool_names.add(tool.name)
Expand Down
Loading