LoginSign up
GitHub

Quick Start

This guide will walk you through creating your first AI agent with Lightfast Core. We'll build a simple assistant that can search the web and maintain conversation history.

Step 1: Create Your First Agent

Let's start by creating a basic agent without any tools:

// src/agents/assistant.ts
import { createAgent } from "lightfast/agent";
import { gateway } from "@ai-sdk/gateway";

export const assistantAgent = createAgent({
  name: "assistant",
  system: `You are a helpful AI assistant. You provide clear, 
    concise, and accurate responses to user questions.`,
  model: gateway("openai/gpt-5-nano"),
  tools: {},
  createRuntimeContext: ({ sessionId, resourceId }) => ({
    sessionId,
    userId: resourceId,
  }),
});

Step 2: Add a Tool

Now let's add a web search tool to make our agent more capable:

// src/tools/web-search.ts
import { createTool } from "lightfast/tool";
import { z } from "zod";

export const webSearchTool = createTool({
  description: "Search the web for current information",
  inputSchema: z.object({
    query: z.string().describe("The search query"),
    limit: z.number().optional().default(5).describe("Maximum results"),
  }),
  execute: async ({ query, limit }, context) => {
    // In a real implementation, you'd call a search API
    console.log(`User ${context.userId} searching for: ${query}`);
    
    // Mock search results
    return {
      results: [
        {
          title: "Example Result",
          url: "https://example.com",
          snippet: "This is a mock search result for: " + query,
        },
      ],
    };
  },
});

Update your agent to use the tool:

// src/agents/assistant.ts
import { createAgent } from "lightfast/agent";
import { gateway } from "@ai-sdk/gateway";
import { webSearchTool } from "../tools/web-search";

export const assistantAgent = createAgent({
  name: "assistant",
  system: `You are a helpful AI assistant with web search capabilities.
    When users ask about current events or need up-to-date information,
    use the web search tool to find relevant information.`,
  model: gateway("openai/gpt-5-nano"),
  tools: {
    webSearch: webSearchTool,
  },
  createRuntimeContext: ({ sessionId, resourceId }) => ({
    sessionId,
    userId: resourceId,
  }),
});

Step 3: Create an API Route

Set up an API route to handle agent requests:

// app/api/chat/route.ts (Next.js App Router)
import { fetchRequestHandler } from "lightfast/agent/handlers";
import { RedisMemory } from "lightfast/agent/memory/adapters/redis";
import { assistantAgent } from "@/agents/assistant";

// Initialize memory adapter
const memory = new RedisMemory({
  url: process.env.KV_REST_API_URL!,
  token: process.env.KV_REST_API_TOKEN!,
});

export async function POST(
  req: Request,
  { params }: { params: { sessionId: string } }
) {
  // In production, get this from your auth provider
  const userId = "user-123";
  
  return fetchRequestHandler({
    agent: assistantAgent,
    sessionId: params.sessionId || "default-session",
    memory,
    req,
    resourceId: userId,
  });
}

export async function GET(
  req: Request,
  { params }: { params: { sessionId: string } }
) {
  // GET requests resume the stream if supported
  const userId = "user-123";
  
  return fetchRequestHandler({
    agent: assistantAgent,
    sessionId: params.sessionId || "default-session",
    memory,
    req,
    resourceId: userId,
    enableResume: true,
  });
}

Step 4: Create a Chat Interface

Build a simple chat interface using the Vercel AI SDK React hooks:

// app/chat/page.tsx
"use client";

import { useChat } from "ai/react";

export default function ChatPage() {
  const { messages, input, handleInputChange, handleSubmit, isLoading } = 
    useChat({
      api: "/api/chat",
    });

  return (
    <div className="flex flex-col h-screen max-w-2xl mx-auto p-4">
      <div className="flex-1 overflow-y-auto space-y-4 mb-4">
        {messages.map((message) => (
          <div
            key={message.id}
            className={`p-4 rounded-lg ${
              message.role === "user" 
                ? "bg-blue-100 ml-auto max-w-xs" 
                : "bg-gray-100 mr-auto max-w-md"
            }`}
          >
            <p className="text-sm font-semibold mb-1">
              {message.role === "user" ? "You" : "Assistant"}
            </p>
            <p>{message.content}</p>
          </div>
        ))}
        {isLoading && (
          <div className="bg-gray-100 p-4 rounded-lg mr-auto max-w-md">
            <p className="text-sm">Assistant is typing...</p>
          </div>
        )}
      </div>
      
      <form onSubmit={handleSubmit} className="flex gap-2">
        <input
          value={input}
          onChange={handleInputChange}
          placeholder="Type your message..."
          className="flex-1 p-2 border rounded-lg"
          disabled={isLoading}
        />
        <button
          type="submit"
          disabled={isLoading}
          className="px-4 py-2 bg-blue-500 text-white rounded-lg 
            disabled:opacity-50"
        >
          Send
        </button>
      </form>
    </div>
  );
}

Step 5: Add Memory Persistence

The agent automatically persists conversation history. Let's explore how to work with memory:

// src/lib/chat-utils.ts
import { RedisMemory } from "lightfast/agent/memory/adapters/redis";

const memory = new RedisMemory({
  url: process.env.KV_REST_API_URL!,
  token: process.env.KV_REST_API_TOKEN!,
});

// Get conversation history
export async function getChatHistory(sessionId: string) {
  return await memory.getMessages(sessionId);
}

// Clear conversation
export async function clearChat(sessionId: string) {
  // Implementation depends on your memory adapter
  // You might need to extend the base Memory interface
}

// Get all user sessions
export async function getUserSessions(userId: string) {
  // This would require additional implementation
  // based on your specific needs
}

Step 6: Add Advanced Features

Streaming Transformations

Add smooth streaming for better UX:

import { smoothStream } from "ai";

export const assistantAgent = createAgent({
  // ... other config
  experimental_transform: smoothStream({
    delayInMs: 25,
    chunking: "word",
  }),
});

Rate Limiting

Protect your API with rate limiting:

// app/api/chat/route.ts
import { arcjet, shield, slidingWindow } from "@vendor/security";

const aj = arcjet({
  key: process.env.ARCJET_KEY!,
  rules: [
    shield({ mode: "LIVE" }),
    slidingWindow({ 
      mode: "LIVE", 
      max: 100, 
      interval: 3600 // 100 requests per hour
    }),
  ],
});

export async function POST(req: Request) {
  // Check rate limit
  const decision = await aj.protect(req);
  if (decision.isDenied()) {
    return Response.json(
      { error: "Rate limit exceeded" },
      { status: 429 }
    );
  }
  
  // Continue with handler...
}

Error Handling

Add comprehensive error handling:

export async function POST(req: Request) {
  try {
    return await fetchRequestHandler({
      agent: assistantAgent,
      sessionId: "session-123",
      memory,
      req,
      resourceId: "user-123",
      onError: ({ error }) => {
        console.error("Agent error:", error);
        // Send to error tracking service
      },
    });
  } catch (error) {
    console.error("Unexpected error:", error);
    return Response.json(
      { error: "Internal server error" },
      { status: 500 }
    );
  }
}

Step 7: Deploy to Production

Environment Variables

Set up your production environment variables:

# .env.production
OPENAI_API_KEY=sk-...
KV_REST_API_URL=https://...upstash.io
KV_REST_API_TOKEN=...
ARCJET_KEY=...

Deploy to Vercel

# Install Vercel CLI
pnpm i -g vercel

# Deploy
vercel --prod

Complete Example

Here's a complete, production-ready example combining everything:

// app/api/agents/[agentId]/sessions/[sessionId]/route.ts
import { auth } from "@clerk/nextjs/server";
import { fetchRequestHandler } from "lightfast/agent/handlers";
import { RedisMemory } from "lightfast/agent/memory/adapters/redis";
import { createAgent } from "lightfast/agent";
import { gateway } from "@ai-sdk/gateway";
import { smoothStream } from "ai";
import { webSearchTool } from "@/tools/web-search";

// Create agent with all features
const agent = createAgent({
  name: "assistant",
  system: `You are a helpful AI assistant with web search capabilities.`,
  model: gateway("anthropic/claude-4-sonnet"),
  tools: {
    webSearch: webSearchTool,
  },
  createRuntimeContext: ({ sessionId, resourceId }) => ({
    sessionId,
    userId: resourceId,
    timestamp: new Date().toISOString(),
  }),
  experimental_transform: smoothStream({
    delayInMs: 25,
    chunking: "word",
  }),
  onFinish: (result) => {
    console.log("Conversation finished:", {
      usage: result.usage,
      finishReason: result.finishReason,
    });
  },
});

const memory = new RedisMemory({
  url: process.env.KV_REST_API_URL!,
  token: process.env.KV_REST_API_TOKEN!,
});

export async function POST(
  req: Request,
  { params }: { params: { agentId: string; sessionId: string } }
) {
  // Authenticate user
  const { userId } = await auth();
  if (!userId) {
    return Response.json({ error: "Unauthorized" }, { status: 401 });
  }

  // Validate agent
  if (params.agentId !== "assistant") {
    return Response.json({ error: "Agent not found" }, { status: 404 });
  }

  return fetchRequestHandler({
    agent,
    sessionId: params.sessionId,
    memory,
    req,
    resourceId: userId,
    enableResume: true,
    createRequestContext: (req) => ({
      userAgent: req.headers.get("user-agent") || undefined,
      ipAddress: req.headers.get("x-forwarded-for") || undefined,
    }),
    onError: ({ error }) => {
      console.error(`Error in session ${params.sessionId}:`, error);
    },
  });
}

What's Next?

Congratulations! You've built your first AI agent with Lightfast Core. Here's what to explore next: