Define the built-in rule-based MultimodalRouter that will route text-only requests to a secondary LLM and multimodal requests (with images) to the primary, multimodal-capable LLM:
Automatically route requests to different LLMs based on task characteristics to optimize cost and performance:
examples/01_standalone_sdk/19_llm_routing.py
Copy
Ask AI
import osfrom pydantic import SecretStrfrom openhands.sdk import ( LLM, Agent, Conversation, Event, ImageContent, LLMConvertibleEvent, Message, TextContent, get_logger,)from openhands.sdk.llm.router import MultimodalRouterfrom openhands.tools.preset.default import get_default_toolslogger = get_logger(__name__)# Configure LLMapi_key = os.getenv("LLM_API_KEY")assert api_key is not None, "LLM_API_KEY environment variable is not set."model = os.getenv("LLM_MODEL", "openhands/claude-sonnet-4-5-20250929")base_url = os.getenv("LLM_BASE_URL")primary_llm = LLM( usage_id="agent-primary", model=model, base_url=base_url, api_key=SecretStr(api_key),)secondary_llm = LLM( usage_id="agent-secondary", model="openhands/devstral-small-2507", base_url=base_url, api_key=SecretStr(api_key),)multimodal_router = MultimodalRouter( usage_id="multimodal-router", llms_for_routing={"primary": primary_llm, "secondary": secondary_llm},)# Toolstools = get_default_tools() # Use our default openhands experience# Agentagent = Agent(llm=multimodal_router, tools=tools)llm_messages = [] # collect raw LLM messagesdef conversation_callback(event: Event): if isinstance(event, LLMConvertibleEvent): llm_messages.append(event.to_llm_message())conversation = Conversation( agent=agent, callbacks=[conversation_callback], workspace=os.getcwd())conversation.send_message( message=Message( role="user", content=[TextContent(text=("Hi there, who trained you?"))], ))conversation.run()conversation.send_message( message=Message( role="user", content=[ ImageContent( image_urls=["http://images.cocodataset.org/val2017/000000039769.jpg"] ), TextContent(text=("What do you see in the image above?")), ], ))conversation.run()conversation.send_message( message=Message( role="user", content=[TextContent(text=("Who trained you as an LLM?"))], ))conversation.run()print("=" * 100)print("Conversation finished. Got the following LLM messages:")for i, message in enumerate(llm_messages): print(f"Message {i}: {str(message)[:200]}")# Report costcost = conversation.conversation_stats.get_combined_metrics().accumulated_costprint(f"EXAMPLE_COST: {cost}")
You can run the example code as-is.
The model name should follow the LiteLLM convention: provider/model_name (e.g., anthropic/claude-sonnet-4-5-20250929, openai/gpt-4o).
The LLM_API_KEY should be the API key for your chosen provider.
ChatGPT Plus/Pro subscribers: You can use LLM.subscription_login() to authenticate with your ChatGPT account and access Codex models without consuming API credits. See the LLM Subscriptions guide for details.