routes.vercel_ai
Vercel AI SDK routes for agent-runtimes server.
TerminateRequest Objects
class TerminateRequest(BaseModel)
Request to terminate a running Vercel AI request.
TerminateResponse Objects
class TerminateResponse(BaseModel)
Response from terminate request.
register_request
def register_request(request_id: str) -> asyncio.Event
Register a running request and return its cancellation event.
Arguments:
request_id- The unique request identifier.
Returns:
An asyncio.Event that can be set to signal cancellation.
unregister_request
def unregister_request(request_id: str) -> None
Unregister a request when it completes.
Arguments:
request_id- The request identifier to remove.
cancel_request
def cancel_request(request_id: str) -> bool
Cancel a running request.
Arguments:
request_id- The request identifier to cancel.
Returns:
True if the request was found and cancelled, False otherwise.
cancel_all_requests
def cancel_all_requests() -> int
Cancel all running requests.
Returns:
Number of requests cancelled.
register_vercel_agent
def register_vercel_agent(agent_id: str, adapter: VercelAITransport) -> None
Register a Vercel AI adapter.
Arguments:
agent_id- Unique identifier for the agent.adapter- The VercelAITransport instance.
unregister_vercel_agent
def unregister_vercel_agent(agent_id: str) -> None
Unregister a Vercel AI adapter.
Arguments:
agent_id- The agent identifier.
get_vercel_adapter
def get_vercel_adapter(agent_id: str) -> VercelAITransport | None
Get a Vercel AI adapter by ID.
Arguments:
agent_id- The agent identifier.
Returns:
The VercelAITransport if found, None otherwise.
list_agents
@router.get("/agents")
async def list_agents() -> dict[str, Any]
List available Vercel AI agents.
Returns:
Dictionary with list of agent IDs.
terminate_agent
@router.post("/terminate", response_model=TerminateResponse)
async def terminate_agent(request: TerminateRequest) -> TerminateResponse
Terminate a running Vercel AI request or all requests.
This endpoint allows clients to stop running agent executions. If request_id is provided, only that request is cancelled. If request_id is None, all running requests are cancelled.
Arguments:
request- Terminate request with optional request_id.
Returns:
Result of the termination request.
list_requests
@router.get("/requests")
async def list_requests() -> dict[str, Any]
List all running Vercel AI requests.
Returns:
Dictionary with list of running request IDs.
chat
@router.post("/{agent_id:path}")
async def chat(request: Request, agent_id: str) -> Response
Handle Vercel AI SDK chat requests.
This endpoint implements the Vercel AI SDK streaming protocol, providing:
- Streaming chat responses
- Tool call support
- Token usage tracking
- Standard message format
The model can be specified in the request body to override the agent's default.
Arguments:
request- The FastAPI/Starlette request.agent_id- The agent to use (defaults to "demo-agent").
Returns:
Streaming response compatible with Vercel AI SDK.
Example:
```javascript
// Client-side with Vercel AI SDK
import { useChat } from 'ai/react';
const { messages, input, handleInputChange, handleSubmit } = useChat({
api: '/api/v1/vercel-ai/chat',
});
```