A toolkit for building LLM-powered applications and agent loops.
$uv add aiuv add ai
Very small
Less framework to get in your way
Async all the way down
Helps you build smooth UX
With all backends in mind
Long-running, serverless, or durable
An agent loop you can read
Primitives for streaming, tool dispatch, and loop execution control, joined together using (mostly) plain Python.
class CustomAgent(ai.Agent): async def loop(self, context: ai.Context): while context.keep_running(): async with ( ai.stream(context=context) as stream, ai.ToolRunner() as tool_runner, ): async for event in ai.util.merge(stream, tool_runner.events()): yield event if isinstance(event, ai.events.ToolEnd): tool_runner.schedule(context.resolve(event.tool_call)) context.add(stream.message) context.add(tool_runner.get_tool_message())
Only essentials
Build more AI apps with less framework.
async with ai.stream(model, [ai.user_message("Hello!")]) as s: async for event in s: print(event)async with agent.run(model, [ai.user_message("Robot uprising?")]) as s: async for event in s: print(event)
class CustomAgent(ai.Agent): async def loop(self, context: ai.Context): while context.keep_running(): async with ( ai.stream(context=context) as stream, ai.ToolRunner() as tool_runner, ): async for event in ai.util.merge(stream, tool_runner.events()): yield event if isinstance(event, ai.events.ToolEnd): tool_runner.schedule(context.resolve(event.tool_call)) context.add(stream.message) context.add(tool_runner.get_tool_message())
agent.py
async with ai.stream(model, [ai.user_message("Hello!")]) as s: async for event in s: print(event)async with agent.run(model, [ai.user_message("Robot uprising?")]) as s: async for event in s: print(event)