Skip to main content
This guide shows you how to build an agent that can search the web using Exa and synthesize the results using OpenAI. This is perfect for building research agents or bots that need real-time information.

Prerequisites

  • cycls installed
  • OpenAI API key
  • Exa API key (get one here)
pip install cycls openai exa-py

Step 1: Define the Search Tool

First, we’ll define a Python function that uses the exa-py client to search the web.
def search_web(query):
    """Search the web for a given query."""
    from exa_py import Exa
    import os
    
    exa = Exa(api_key=os.getenv("EXA_API_KEY"))
    
    # Search and get the top 3 results with text content
    response = exa.search_and_contents(
        query,
        num_results=3,
        text=True
    )
    
    # Format results for the LLM
    results = []
    for r in response.results:
        results.append(f"Title: {r.title}\nURL: {r.url}\nContent: {r.text[:1000]}...")
        
    return "\n\n".join(results)

Step 2: Define the Tool Schema

Tell OpenAI about our search tool so it knows when to use it.
tools = [{
    "type": "function",
    "name": "search_web",
    "description": "Search the web for real-time information.",
    "parameters": {
        "type": "object",
        "properties": {
            "query": {"type": "string", "description": "The search query"}
        },
        "required": ["query"]
    }
}]

Step 3: Initialize the Agent

Configure the agent with the necessary dependencies and load your environment variables.
import cycls
import os
import json
from openai import OpenAI

agent = cycls.Agent(
    pip=["openai", "exa-py", "python-dotenv"],
    copy=[".env"]
)

@agent("search-agent", title="Research Assistant")
async def researcher(context):
    from openai import OpenAI
    from dotenv import load_dotenv
    load_dotenv()
    
    client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
    
    # 1. Initial Request with Tools
    response = client.responses.create(
        model="gpt-4o",
        input=context.messages,
        tools=tools
    )
    
    # Update context with the model's response (which might be a tool call)
    context.messages.extend(response.output)
    
    # 2. Handle Tool Execution
    tool_called = False
    for item in response.output:
        if item.type == "function_call":
            tool_called = True
            if item.name == "search_web":
                # Execute the search
                args = json.loads(item.arguments)
                search_result = search_web(args["query"])
                
                # Add result to context
                context.messages.append({
                    "type": "function_call_output",
                    "call_id": item.call_id,
                    "output": search_result
                })
    
    # 3. Final Response (Synthesize findings)
    if tool_called:
        final = client.responses.create(
            model="gpt-4o",
            input=context.messages
        )
        yield final.output_text
    else:
        yield response.output_text

agent.deploy(prod=False)

Full Code

Create an agent.py file with the following content:
import cycls
import json
import os
from openai import OpenAI

# Initialize Agent
agent = cycls.Agent(
    pip=["openai", "exa-py", "python-dotenv"],
    copy=[".env"]
)

# Define Search Tool
def search_web(query):
    """Search the web for a given query using Exa."""
    from exa_py import Exa
    # Ensure EXA_API_KEY is in your .env file
    exa = Exa(api_key=os.getenv("EXA_API_KEY"))
    
    response = exa.search_and_contents(
        query,
        num_results=3,
        text=True
    )
    
    results = []
    for r in response.results:
        results.append(f"Title: {r.title}\nURL: {r.url}\nContent: {r.text[:1000]}...")
        
    return "\n\n".join(results)

# Tool Schema
tools = [{
    "type": "function",
    "name": "search_web",
    "description": "Search the web for real-time information.",
    "parameters": {
        "type": "object",
        "properties": {
            "query": {"type": "string", "description": "The search query"}
        },
        "required": ["query"]
    }
}]

@agent("search-agent", title="Research Assistant")
async def researcher(context):
    from openai import OpenAI
    from dotenv import load_dotenv
    load_dotenv()
    
    client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
    
    # 1. First Call to LLM
    response = client.responses.create(
        model="gpt-4o",
        input=context.messages,
        tools=tools
    )
    
    # Update history
    context.messages.extend(response.output)
    
    # 2. Check for Function Calls
    tool_called = False
    for item in response.output:
        if item.type == "function_call":
            tool_called = True
            if item.name == "search_web":
                args = json.loads(item.arguments)
                result = search_web(args["query"])
                
                context.messages.append({
                    "type": "function_call_output",
                    "call_id": item.call_id,
                    "output": result
                })
    
    # 3. Final Response
    if tool_called:
        final = client.responses.create(
            model="gpt-4o",
            input=context.messages
        )
        yield final.output_text
    else:
        yield response.output_text

agent.deploy(prod=False)

Configuration

Make sure your .env file contains both API keys:
OPENAI_API_KEY=sk-...
EXA_API_KEY=...