import cycls
import os
from dotenv import load_dotenv
# Load environment variables from .env
load_dotenv()
# Initialize agent with secrets and dependencies
agent = cycls.Agent(
key=os.getenv("CYCLS_API_KEY"),
pip=["openai", "python-dotenv"],
copy=[".env"] # Securely copy .env file to the deployed agent
)
# A helper function to call the LLM
async def llm(messages):
import openai
# Initialize OpenAI client using the environment variable
client = openai.AsyncOpenAI(api_key=os.getenv("OPENAI_API_KEY"))
response = await client.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
stream=True
)
# Yield the content from the streaming response
async def event_stream():
async for chunk in response:
content = chunk.choices[0].delta.content
if content:
yield content
return event_stream()
# Register the function as an agent named "cake"
@agent("cake", title="My AI Agent", auth=True)
async def cake_agent(context):
# The context object contains the message history
return await llm(context.messages)
# Set prod to True to deploy
agent.deploy(prod=True)