| 1 | import os |
| 2 | from smallestai.atoms.agent.nodes import OutputAgentNode |
| 3 | from smallestai.atoms.agent.clients.openai import OpenAIClient |
| 4 | from smallestai.atoms.agent.tools import ToolRegistry, function_tool |
| 5 | from smallestai.atoms.agent.events import SDKAgentEndCallEvent |
| 6 | |
| 7 | class SupportAgent(OutputAgentNode): |
| 8 | def __init__(self): |
| 9 | super().__init__(name="support-agent") |
| 10 | self.llm = OpenAIClient(model="gpt-4o-mini") |
| 11 | |
| 12 | self.tool_registry = ToolRegistry() |
| 13 | self.tool_registry.discover(self) |
| 14 | |
| 15 | self.context.add_message({ |
| 16 | "role": "system", |
| 17 | "content": "You are a support agent. Help users with their questions. " |
| 18 | "Use end_call when they're done." |
| 19 | }) |
| 20 | |
| 21 | @function_tool() |
| 22 | def get_order_status(self, order_id: str): |
| 23 | """Look up an order's status. |
| 24 | |
| 25 | Args: |
| 26 | order_id: Order ID like "ORD-12345" |
| 27 | """ |
| 28 | # Your implementation |
| 29 | return {"status": "shipped", "eta": "Tomorrow"} |
| 30 | |
| 31 | @function_tool() |
| 32 | async def end_call(self): |
| 33 | """End the call when the user says goodbye.""" |
| 34 | await self.send_event(SDKAgentEndCallEvent()) |
| 35 | return None |
| 36 | |
| 37 | async def generate_response(self): |
| 38 | response = await self.llm.chat( |
| 39 | messages=self.context.messages, |
| 40 | stream=True, |
| 41 | tools=self.tool_registry.get_schemas() |
| 42 | ) |
| 43 | |
| 44 | tool_calls = [] |
| 45 | async for chunk in response: |
| 46 | if chunk.content: |
| 47 | yield chunk.content |
| 48 | if chunk.tool_calls: |
| 49 | tool_calls.extend(chunk.tool_calls) |
| 50 | |
| 51 | if tool_calls: |
| 52 | results = await self.tool_registry.execute(tool_calls=tool_calls, parallel=True) |
| 53 | |
| 54 | self.context.add_messages([ |
| 55 | { |
| 56 | "role": "assistant", |
| 57 | "content": "", |
| 58 | "tool_calls": [ |
| 59 | {"id": tc.id, "type": "function", "function": {"name": tc.name, "arguments": str(tc.arguments)}} |
| 60 | for tc in tool_calls |
| 61 | ] |
| 62 | }, |
| 63 | *[{"role": "tool", "tool_call_id": tc.id, "content": result.content} |
| 64 | for tc, result in zip(tool_calls, results)] |
| 65 | ]) |
| 66 | |
| 67 | final = await self.llm.chat(messages=self.context.messages, stream=True) |
| 68 | async for chunk in final: |
| 69 | if chunk.content: |
| 70 | yield chunk.content |