Skip to content

Parallel agent call not working  #230

@neory9771

Description

@neory9771
#!/usr/bin/env python3
"""
Test script to check if langgraph-supervisor-py supports parallel execution
"""

from langchain_core.messages import HumanMessage
from langchain_openai import ChatOpenAI
from langgraph_supervisor import create_supervisor
from langgraph.prebuilt import create_react_agent
import time
import threading
import os
from dotenv import load_dotenv

# Load environment variables from .env file
load_dotenv()

# Create tools that log when they execute
execution_log = []
lock = threading.Lock()

def log_execution(message):
    # with lock:
    #     execution_log.append(f"{time.time():.3f}: {message}")
    print(f"{time.time():.3f}: {message}")

def tool1():
    """Tool for agent 1"""
    log_execution("🔧 TOOL 1 STARTING")
    time.sleep(1)  # Simulate work
    log_execution("✅ TOOL 1 COMPLETED")
    return 'c'

def tool2(): 
    """Tool for agent 2"""
    log_execution("🔧 TOOL 2 STARTING")
    time.sleep(1)  # Simulate work  
    log_execution("✅ TOOL 2 COMPLETED")
    return 5

def main():
    print("🧪 Testing langgraph-supervisor-py parallel execution with real OpenAI...")
    
    # Use real OpenAI model
    model = ChatOpenAI(model="gpt-4.1", temperature=0)
    
    try:
        # Create agents
        agent1 = create_react_agent(
            model=model,
            prompt="used to generate sample character, call tool only once",
            tools=[tool1],
            name='agent1'
        )

        agent2 = create_react_agent(
            model=model,
            prompt="used to generate an int from 1 to 10, call tool only once",
            tools=[tool2],
            name='agent2'
        )

        # Create supervisor with a prompt that encourages parallel calls
        workflow = create_supervisor(
            [agent1, agent2],
            # parallel_tool_calls=True,
            parallel_tool_calls=False,
            model=model,
            prompt='You are a supervisor. call agents in parallel'
        )

        print("✅ Supervisor workflow created")
        
        # Compile the workflow
        app = workflow.compile()
        print("✅ Workflow compiled")
        
        # Test with a message that should trigger both agents
        print("\n🚀 Starting execution test...")
        start_time = time.time()
        
        result = app.invoke({
            "messages": [
                HumanMessage(content="generate sample character C using agent1. Generate a sample int N using agent 2. return a string of a char C repeated N times. example: char='c', N=2, return 'cc'")
            ]
        })

        print(f"{result=}")
        end_time = time.time()
        total_time = end_time - start_time
        
        print(f"\n📊 Execution Results:")
        print(f"Total execution time: {total_time:.3f}s")
        print(f"Execution log:")
        for entry in execution_log:
            print(f"  {entry}")
            
        #
        print(f"\nFinal result type: {type(result)}")
        if 'messages' in result:
            print(f"Number of messages: {len(result['messages'])}")
            
    except Exception as e:
        print(f"❌ Error during test: {e}")
        import traceback
        traceback.print_exc()

if __name__ == "__main__":
    main()

Above code doesn't work as expected.
Expected behaviour: call each agent once, in parallel. Actual behaviour: no parallel call + undefined sequence of calls.

When supervisor instructed to: 'You are a supervisor. Do not call agents in parallel" all works fine.

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions